diff options
Diffstat (limited to 'drivers/net')
440 files changed, 23033 insertions, 14444 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 4cbb8b27a891..b9304a295f86 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -357,6 +357,14 @@ static u8 __get_duplex(struct port *port) return retval; } +static void __ad_actor_update_port(struct port *port) +{ + const struct bonding *bond = bond_get_bond_by_slave(port->slave); + + port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; + port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority; +} + /* Conversions */ /** @@ -1963,9 +1971,7 @@ void bond_3ad_bind_slave(struct slave *slave) port->actor_admin_port_key = bond->params.ad_user_port_key << 6; ad_update_actor_keys(port, false); /* actor system is the bond's system */ - port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; - port->actor_system_priority = - BOND_AD_INFO(bond).system.sys_priority; + __ad_actor_update_port(port); /* tx timer(to verify that no more than MAX_TX_IN_SECOND * lacpdu's are sent in one second) */ @@ -2148,6 +2154,38 @@ out: } /** + * bond_3ad_update_ad_actor_settings - reflect change of actor settings to ports + * @bond: bonding struct to work on + * + * If an ad_actor setting gets changed we need to update the individual port + * settings so the bond device will use the new values when it gets upped. + */ +void bond_3ad_update_ad_actor_settings(struct bonding *bond) +{ + struct list_head *iter; + struct slave *slave; + + ASSERT_RTNL(); + + BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio; + if (is_zero_ether_addr(bond->params.ad_actor_system)) + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->dev->dev_addr); + else + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->params.ad_actor_system); + + spin_lock_bh(&bond->mode_lock); + bond_for_each_slave(bond, slave, iter) { + struct port *port = &(SLAVE_AD_INFO(slave))->port; + + __ad_actor_update_port(port); + port->ntt = true; + } + spin_unlock_bh(&bond->mode_lock); +} + +/** * bond_3ad_state_machine_handler - handle state machines timeout * @bond: bonding struct to work on * diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index bb9e9fc45e1b..c5ac160a8ae9 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -159,7 +159,7 @@ static int tlb_initialize(struct bonding *bond) new_hashtbl = kzalloc(size, GFP_KERNEL); if (!new_hashtbl) - return -1; + return -ENOMEM; spin_lock_bh(&bond->mode_lock); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index cab99fd44c8e..45bdd87d6b7a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -618,8 +618,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, static void bond_set_dev_addr(struct net_device *bond_dev, struct net_device *slave_dev) { - netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n", - bond_dev, slave_dev, slave_dev->addr_len); + netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n", + bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len); memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); bond_dev->addr_assign_type = NET_ADDR_STOLEN; call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); @@ -928,11 +928,10 @@ void bond_select_active_slave(struct bonding *bond) if (!rv) return; - if (netif_carrier_ok(bond->dev)) { + if (netif_carrier_ok(bond->dev)) netdev_info(bond->dev, "first active interface up!\n"); - } else { + else netdev_info(bond->dev, "now running without any active interface!\n"); - } } } @@ -1178,9 +1177,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } - if (bond_should_deliver_exact_match(skb, slave, bond)) { + if (bond_should_deliver_exact_match(skb, slave, bond)) return RX_HANDLER_EXACT; - } skb->dev = bond->dev; @@ -1226,7 +1224,6 @@ static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave) &lag_upper_info); if (err) return err; - slave->dev->flags |= IFF_SLAVE; rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL); return 0; } @@ -1242,7 +1239,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond) { struct slave *slave = NULL; - slave = kzalloc(sizeof(struct slave), GFP_KERNEL); + slave = kzalloc(sizeof(*slave), GFP_KERNEL); if (!slave) return NULL; @@ -1382,8 +1379,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (slave_dev->flags & IFF_UP) { netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n", slave_dev->name); - res = -EPERM; - goto err_undo_flags; + return -EPERM; } /* set bonding device ether type by slave - bonding netdevices are @@ -1403,8 +1399,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = notifier_to_errno(res); if (res) { netdev_err(bond_dev, "refused to change device type\n"); - res = -EBUSY; - goto err_undo_flags; + return -EBUSY; } /* Flush unicast and multicast addresses */ @@ -1424,8 +1419,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } else if (bond_dev->type != slave_dev->type) { netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n", slave_dev->name, slave_dev->type, bond_dev->type); - res = -EINVAL; - goto err_undo_flags; + return -EINVAL; } if (slave_ops->ndo_set_mac_address == NULL) { @@ -1493,6 +1487,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } } + /* set slave flag before open to prevent IPv6 addrconf */ + slave_dev->flags |= IFF_SLAVE; + /* open the slave since the application closed it */ res = dev_open(slave_dev); if (res) { @@ -1758,6 +1755,7 @@ err_close: dev_close(slave_dev); err_restore_mac: + slave_dev->flags &= ~IFF_SLAVE; if (!bond->params.fail_over_mac || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* XXX TODO - fom follow mode needs to change master's @@ -3306,6 +3304,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes; stats->rx_errors += sstats->rx_errors - pstats->rx_errors; stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped; + stats->rx_nohandler += sstats->rx_nohandler - pstats->rx_nohandler; stats->tx_packets += sstats->tx_packets - pstats->tx_packets;; stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 55e93b6b6d21..577e57cad1dc 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -402,7 +402,6 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .id = BOND_OPT_AD_ACTOR_SYS_PRIO, .name = "ad_actor_sys_prio", .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), - .flags = BOND_OPTFLAG_IFDOWN, .values = bond_ad_actor_sys_prio_tbl, .set = bond_option_ad_actor_sys_prio_set, }, @@ -410,7 +409,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .id = BOND_OPT_AD_ACTOR_SYSTEM, .name = "ad_actor_system", .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), - .flags = BOND_OPTFLAG_RAWVAL | BOND_OPTFLAG_IFDOWN, + .flags = BOND_OPTFLAG_RAWVAL, .set = bond_option_ad_actor_system_set, }, [BOND_OPT_AD_USER_PORT_KEY] = { @@ -1392,6 +1391,8 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond, newval->value); bond->params.ad_actor_sys_prio = newval->value; + bond_3ad_update_ad_actor_settings(bond); + return 0; } @@ -1418,6 +1419,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond, netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac); ether_addr_copy(bond->params.ad_actor_system, mac); + bond_3ad_update_ad_actor_settings(bond); + return 0; err: diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 313dbac207ee..e23c3ed737de 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -480,7 +480,7 @@ static ssize_t bonding_show_mii_status(struct device *d, char *buf) { struct bonding *bond = to_bond(d); - bool active = !!rcu_access_pointer(bond->curr_active_slave); + bool active = netif_carrier_ok(bond->dev); return sprintf(buf, "%s\n", active ? "up" : "down"); } diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 9fe33fc3c2b9..cf34681af4f6 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1532,7 +1532,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, /* no PVID with ranges, otherwise it's a bug */ if (pvid) - err = _mv88e6xxx_port_pvid_set(ds, port, vid); + err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end); unlock: mutex_unlock(&ps->smi_mutex); @@ -2163,7 +2163,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * database, and allow every port to egress frames on all other ports. */ reg = BIT(ps->num_ports) - 1; /* all ports */ - ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port); + reg &= ~BIT(port); /* except itself */ + ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg); if (ret) goto abort; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 1c5f3b273e6a..79e1a0282163 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2459,8 +2459,13 @@ boomerang_interrupt(int irq, void *dev_id) struct sk_buff *skb = vp->tx_skbuff[entry]; #if DO_ZEROCOPY int i; - for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) - pci_unmap_single(VORTEX_PCI(vp), + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].frag[0].addr), + le32_to_cpu(vp->tx_ring[entry].frag[0].length), + PCI_DMA_TODEVICE); + + for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) + pci_unmap_page(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].frag[i].addr), le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, PCI_DMA_TODEVICE); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 0443654f0339..c89b9aeeceb6 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -372,7 +372,7 @@ static int ax_mii_probe(struct net_device *dev) ax->phy_dev = phy_dev; netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); + phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); return 0; } @@ -627,7 +627,7 @@ static int ax_mii_init(struct net_device *dev) struct platform_device *pdev = to_platform_device(dev->dev.parent); struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); - int err, i; + int err; ax->bb_ctrl.ops = &bb_ops; ax->addr_memr = ei_local->mem + AX_MEMR; @@ -642,23 +642,12 @@ static int ax_mii_init(struct net_device *dev) snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!ax->mii_bus->irq) { - err = -ENOMEM; - goto out_free_mdio_bitbang; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - ax->mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(ax->mii_bus); if (err) - goto out_free_irq; + goto out_free_mdio_bitbang; return 0; - out_free_irq: - kfree(ax->mii_bus->irq); out_free_mdio_bitbang: free_mdio_bitbang(ax->mii_bus); out: diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index e0e95a15cab0..74139cb7f849 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -380,9 +380,8 @@ static void bfin_mac_adjust_link(struct net_device *dev) static int mii_probe(struct net_device *dev, int phy_mode) { struct bfin_mac_local *lp = netdev_priv(dev); - struct phy_device *phydev = NULL; + struct phy_device *phydev; unsigned short sysctl; - int i; u32 sclk, mdc_div; /* Enable PHY output early */ @@ -396,18 +395,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); bfin_write_EMAC_SYSCTL(sysctl); - /* search for connected PHY device */ - for (i = 0; i < PHY_MAX_ADDR; ++i) { - struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i]; - - if (!tmp_phydev) - continue; /* no PHY here... */ - - phydev = tmp_phydev; - break; /* found it */ - } - - /* now we are supposed to have a proper phydev, to attach to... */ + phydev = phy_find_first(lp->mii_bus); if (!phydev) { netdev_err(dev, "no phy device found\n"); return -ENODEV; @@ -419,7 +407,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) return -EINVAL; } - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &bfin_mac_adjust_link, phy_mode); if (IS_ERR(phydev)) { @@ -444,10 +432,8 @@ static int mii_probe(struct net_device *dev, int phy_mode) lp->old_duplex = -1; lp->phydev = phydev; - pr_info("attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq, - MDC_CLK, mdc_div, sclk/1000000); + phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", + MDC_CLK, mdc_div, sclk / 1000000); return 0; } @@ -1840,12 +1826,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev) snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (!miibus->irq) - goto out_err_irq_alloc; - - for (i = rc; i < PHY_MAX_ADDR; ++i) - miibus->irq[i] = PHY_POLL; rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR); if (rc != mii_bus_pd->phydev_number) @@ -1864,14 +1844,12 @@ static int bfin_mii_bus_probe(struct platform_device *pdev) rc = mdiobus_register(miibus); if (rc) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); - goto out_err_mdiobus_register; + goto out_err_irq_alloc; } platform_set_drvdata(pdev, miibus); return 0; -out_err_mdiobus_register: - kfree(miibus->irq); out_err_irq_alloc: mdiobus_free(miibus); out_err_alloc: @@ -1887,7 +1865,6 @@ static int bfin_mii_bus_remove(struct platform_device *pdev) dev_get_platdata(&pdev->dev); mdiobus_unregister(miibus); - kfree(miibus->irq); mdiobus_free(miibus); peripheral_free_list(mii_bus_pd->mac_peripherals); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 20bf55dbd76f..b873531c5575 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1337,11 +1337,6 @@ static int greth_mdio_init(struct greth_private *greth) greth->mdio->write = greth_mdio_write; greth->mdio->priv = greth; - greth->mdio->irq = greth->mdio_irqs; - - for (phy = 0; phy < PHY_MAX_ADDR; phy++) - greth->mdio->irq[phy] = PHY_POLL; - ret = mdiobus_register(greth->mdio); if (ret) { goto error; diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index ae16ac94daf8..92dd918e4a83 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -125,7 +125,6 @@ struct greth_private { struct phy_device *phy; struct mii_bus *mdio; - int mdio_irqs[PHY_MAX_ADDR]; unsigned int link; unsigned int speed; unsigned int duplex; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index e0f3d197e7f2..3f3bcbea15bd 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -1235,7 +1235,7 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) if (!phydev) return -EIO; - return et131x_phy_mii_read(adapter, phydev->addr, reg, value); + return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value); } static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, @@ -1462,7 +1462,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) data &= ~BMCR_PDOWN; if (down) data |= BMCR_PDOWN; - et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); + et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data); } /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ @@ -1490,7 +1490,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) else lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); - et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); + et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2); } } @@ -3192,14 +3192,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18 | 0x4); - et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, - register18 | 0x8402); - et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, - register18 | 511); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_INDEX_REG, register18 | 0x8402); + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18); } @@ -3212,8 +3212,8 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_CONFIG, ®); reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; - et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, - reg); + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_CONFIG, reg); } et131x_set_rx_dma_timer(adapter); @@ -3226,14 +3226,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18 | 0x4); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_INDEX_REG, register18 | 0x8402); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_DATA_REG, register18 | 511); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18); } @@ -3265,7 +3265,7 @@ static int et131x_mii_probe(struct net_device *netdev) return -ENODEV; } - phydev = phy_connect(netdev, dev_name(&phydev->dev), + phydev = phy_connect(netdev, phydev_name(phydev), &et131x_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -3289,9 +3289,7 @@ static int et131x_mii_probe(struct net_device *netdev) phydev->autoneg = AUTONEG_ENABLE; adapter->phydev = phydev; - dev_info(&adapter->pdev->dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; } @@ -3327,7 +3325,6 @@ static void et131x_pci_remove(struct pci_dev *pdev) netif_napi_del(&adapter->napi); phy_disconnect(adapter->phydev); mdiobus_unregister(adapter->mii_bus); - kfree(adapter->mii_bus->irq); mdiobus_free(adapter->mii_bus); et131x_adapter_memory_free(adapter); @@ -3946,7 +3943,6 @@ static int et131x_pci_setup(struct pci_dev *pdev, struct net_device *netdev; struct et131x_adapter *adapter; int rc; - int ii; rc = pci_enable_device(pdev); if (rc < 0) { @@ -4036,18 +4032,11 @@ static int et131x_pci_setup(struct pci_dev *pdev, adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; - adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!adapter->mii_bus->irq) - goto err_mdio_free; - - for (ii = 0; ii < PHY_MAX_ADDR; ii++) - adapter->mii_bus->irq[ii] = PHY_POLL; rc = mdiobus_register(adapter->mii_bus); if (rc < 0) { dev_err(&pdev->dev, "failed to register MII bus\n"); - goto err_mdio_free_irq; + goto err_mdio_free; } rc = et131x_mii_probe(netdev); @@ -4087,8 +4076,6 @@ err_phy_disconnect: phy_disconnect(adapter->phydev); err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); -err_mdio_free_irq: - kfree(adapter->mii_bus->irq); err_mdio_free: mdiobus_free(adapter->mii_bus); err_mem_free: diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index fe644823ceaf..17472851674f 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -131,7 +131,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) { struct altera_tse_private *priv = netdev_priv(dev); int ret; - int i; struct device_node *mdio_node = NULL; struct mii_bus *mdio = NULL; struct device_node *child_node = NULL; @@ -161,14 +160,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->write = &altera_tse_mdio_write; snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id); - mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (mdio->irq == NULL) { - ret = -ENOMEM; - goto out_free_mdio; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - mdio->irq[i] = PHY_POLL; - mdio->priv = dev; mdio->parent = priv->device; @@ -176,7 +167,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) if (ret != 0) { netdev_err(dev, "Cannot register MDIO bus %s\n", mdio->id); - goto out_free_mdio_irq; + goto out_free_mdio; } if (netif_msg_drv(priv)) @@ -184,8 +175,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) priv->mdio = mdio; return 0; -out_free_mdio_irq: - kfree(mdio->irq); out_free_mdio: mdiobus_free(mdio); mdio = NULL; @@ -855,7 +844,7 @@ static int init_phy(struct net_device *dev) } netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", - phydev->addr, phydev->phy_id, phydev->link); + phydev->mdio.addr, phydev->phy_id, phydev->link); priv->phydev = phydev; return 0; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 5330bcb8a944..d3977d032b48 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -344,9 +344,6 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr, static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { - /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does - * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) - */ struct net_device *const dev = bus->priv; /* make sure the MAC associated with this @@ -502,7 +499,7 @@ static int au1000_mii_probe(struct net_device *dev) BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); if (aup->phy_addr) - phydev = aup->mii_bus->phy_map[aup->phy_addr]; + phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); else netdev_info(dev, "using PHY-less setup\n"); return 0; @@ -512,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev) * on the current MAC's MII bus */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) - if (aup->mii_bus->phy_map[phy_addr]) { - phydev = aup->mii_bus->phy_map[phy_addr]; + if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) { + phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); if (!aup->phy_search_highest_addr) /* break out with first one found */ break; @@ -531,7 +528,8 @@ static int au1000_mii_probe(struct net_device *dev) */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { struct phy_device *const tmp_phydev = - aup->mii_bus->phy_map[phy_addr]; + mdiobus_get_phy(aup->mii_bus, + phy_addr); if (aup->mac_id == 1) break; @@ -558,7 +556,7 @@ static int au1000_mii_probe(struct net_device *dev) /* now we are supposed to have a proper phydev, to attach to... */ BUG_ON(phydev->attached_dev); - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &au1000_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -583,9 +581,7 @@ static int au1000_mii_probe(struct net_device *dev) aup->old_duplex = -1; aup->phy_dev = phydev; - netdev_info(dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; } @@ -1293,14 +1289,7 @@ static int au1000_probe(struct platform_device *pdev) aup->mii_bus->name = "au1000_eth_mii"; snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, aup->mac_id); - aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (aup->mii_bus->irq == NULL) { - err = -ENOMEM; - goto err_out; - } - for (i = 0; i < PHY_MAX_ADDR; ++i) - aup->mii_bus->irq[i] = PHY_POLL; /* if known, set corresponding PHY IRQs */ if (aup->phy_static_config) if (aup->phy_irq && aup->phy_busid == aup->mac_id) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index b6fa89102526..bbef95973c27 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -768,12 +768,16 @@ #define MTL_Q_TQDR 0x08 #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 -#define MTL_Q_RQDR 0x4c +#define MTL_Q_RQDR 0x48 #define MTL_Q_RQFCR 0x50 #define MTL_Q_IER 0x70 #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_INDEX 16 +#define MTL_Q_RQDR_PRXQ_WIDTH 14 +#define MTL_Q_RQDR_RXQSTS_INDEX 4 +#define MTL_Q_RQDR_RXQSTS_WIDTH 2 #define MTL_Q_RQFCR_RFA_INDEX 1 #define MTL_Q_RQFCR_RFA_WIDTH 6 #define MTL_Q_RQFCR_RFD_INDEX 17 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index a6b9899e285f..895d35639129 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -146,6 +146,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); unsigned int i, tc_ets, tc_ets_weight; + u8 max_tc = 0; tc_ets = 0; tc_ets_weight = 0; @@ -157,12 +158,9 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]); - if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && - (i >= pdata->hw_feat.tc_cnt)) - return -EINVAL; - - if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt) - return -EINVAL; + max_tc = max_t(u8, max_tc, ets->prio_tc[i]); + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i])) + max_tc = max_t(u8, max_tc, i); switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: @@ -171,15 +169,28 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, tc_ets = 1; tc_ets_weight += ets->tc_tx_bw[i]; break; - default: + netif_err(pdata, drv, netdev, + "unsupported TSA algorithm (%hhu)\n", + ets->tc_tsa[i]); return -EINVAL; } } + /* Check maximum traffic class requested */ + if (max_tc >= pdata->hw_feat.tc_cnt) { + netif_err(pdata, drv, netdev, + "exceeded number of supported traffic classes\n"); + return -EINVAL; + } + /* Weights must add up to 100% */ - if (tc_ets && (tc_ets_weight != 100)) + if (tc_ets && (tc_ets_weight != 100)) { + netif_err(pdata, drv, netdev, + "sum of ETS algorithm weights is not 100 (%u)\n", + tc_ets_weight); return -EINVAL; + } if (!pdata->ets) { pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets), @@ -188,6 +199,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, return -ENOMEM; } + pdata->num_tcs = max_tc + 1; memcpy(pdata->ets, ets, sizeof(*pdata->ets)); pdata->hw_if.config_dcb_tc(pdata); @@ -221,6 +233,13 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n", pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); + /* Check PFC for supported number of traffic classes */ + if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) { + netif_err(pdata, drv, netdev, + "PFC requested for unsupported traffic class\n"); + return -EINVAL; + } + if (!pdata->pfc) { pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), GFP_KERNEL); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index f6a7161e3b85..1babcc11a248 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -518,13 +518,45 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { + struct ieee_pfc *pfc = pdata->pfc; + struct ieee_ets *ets = pdata->ets; unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Set MTL flow control */ - for (i = 0; i < pdata->rx_q_count; i++) - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); + for (i = 0; i < pdata->rx_q_count; i++) { + unsigned int ehfc = 0; + + if (pfc && ets) { + unsigned int prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + unsigned int tc; + + /* Does this queue handle the priority? */ + if (pdata->prio2q_map[prio] != i) + continue; + + /* Get the Traffic Class for this priority */ + tc = ets->prio_tc[prio]; + + /* Check if flow control should be enabled */ + if (pfc->pfc_en & (1 << tc)) { + ehfc = 1; + break; + } + } + } else { + ehfc = 1; + } + + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); + + netif_dbg(pdata, drv, pdata->netdev, + "flow control %s for RXq%u\n", + ehfc ? "enabled" : "disabled", i); + } /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; @@ -702,6 +734,113 @@ static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) return 0; } +static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + /* Put the VLAN tag in the Rx descriptor */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); + + /* Don't check the VLAN type */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); + + /* Check only C-TAG (0x8100) packets */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); + + /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); + + /* Enable VLAN tag stripping */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); + + return 0; +} + +static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); + + return 0; +} + +static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Enable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); + + /* Enable VLAN Hash Table filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); + + /* Disable VLAN tag inverse matching */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); + + /* Only filter on the lower 12-bits of the VLAN tag */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); + + /* In order for the VLAN Hash Table filtering to be effective, + * the VLAN tag identifier in the VLAN Tag Register must not + * be zero. Set the VLAN tag identifier to "1" to enable the + * VLAN Hash Table filtering. This implies that a VLAN tag of + * 1 will always pass filtering. + */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); + + return 0; +} + +static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Disable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); + + return 0; +} + +static u32 xgbe_vid_crc32_le(__le16 vid_le) +{ + u32 poly = 0xedb88320; /* CRCPOLY_LE */ + u32 crc = ~0; + u32 temp = 0; + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= poly; + } + + return crc; +} + +static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) +{ + u32 crc; + u16 vid; + __le16 vid_le; + u16 vlan_hash_table = 0; + + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } + + /* Set the VLAN Hash Table filtering register */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); + + return 0; +} + static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) { @@ -714,6 +853,14 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); + /* Hardware will still perform VLAN filtering in promiscuous mode */ + if (enable) { + xgbe_disable_rx_vlan_filtering(pdata); + } else { + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + xgbe_enable_rx_vlan_filtering(pdata); + } + return 0; } @@ -875,6 +1022,7 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { + unsigned long flags; unsigned int mmd_address; int mmd_data; @@ -892,10 +1040,10 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ - mutex_lock(&pdata->xpcs_mutex); + spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); - mutex_unlock(&pdata->xpcs_mutex); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); return mmd_data; } @@ -904,6 +1052,7 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { unsigned int mmd_address; + unsigned long flags; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; @@ -919,10 +1068,10 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ - mutex_lock(&pdata->xpcs_mutex); + spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); - mutex_unlock(&pdata->xpcs_mutex); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) @@ -944,116 +1093,6 @@ static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) return 0; } -static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) -{ - /* Put the VLAN tag in the Rx descriptor */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); - - /* Don't check the VLAN type */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); - - /* Check only C-TAG (0x8100) packets */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); - - /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); - - /* Enable VLAN tag stripping */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); - - return 0; -} - -static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) -{ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); - - return 0; -} - -static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) -{ - /* Enable VLAN filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); - - /* Enable VLAN Hash Table filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); - - /* Disable VLAN tag inverse matching */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); - - /* Only filter on the lower 12-bits of the VLAN tag */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); - - /* In order for the VLAN Hash Table filtering to be effective, - * the VLAN tag identifier in the VLAN Tag Register must not - * be zero. Set the VLAN tag identifier to "1" to enable the - * VLAN Hash Table filtering. This implies that a VLAN tag of - * 1 will always pass filtering. - */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); - - return 0; -} - -static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) -{ - /* Disable VLAN filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); - - return 0; -} - -#ifndef CRCPOLY_LE -#define CRCPOLY_LE 0xedb88320 -#endif -static u32 xgbe_vid_crc32_le(__le16 vid_le) -{ - u32 poly = CRCPOLY_LE; - u32 crc = ~0; - u32 temp = 0; - unsigned char *data = (unsigned char *)&vid_le; - unsigned char data_byte = 0; - int i, bits; - - bits = get_bitmask_order(VLAN_VID_MASK); - for (i = 0; i < bits; i++) { - if ((i % 8) == 0) - data_byte = data[i / 8]; - - temp = ((crc & 1) ^ data_byte) & 1; - crc >>= 1; - data_byte >>= 1; - - if (temp) - crc ^= poly; - } - - return crc; -} - -static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) -{ - u32 crc; - u16 vid; - __le16 vid_le; - u16 vlan_hash_table = 0; - - /* Generate the VLAN Hash Table value */ - for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { - /* Get the CRC32 value of the VLAN ID */ - vid_le = cpu_to_le16(vid); - crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; - - vlan_hash_table |= (1 << crc); - } - - /* Set the VLAN Hash Table filtering register */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); - - return 0; -} - static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) { struct xgbe_ring_desc *rdesc = rdata->rdesc; @@ -1288,11 +1327,42 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, return 0; } +static void xgbe_config_tc(struct xgbe_prv_data *pdata) +{ + unsigned int offset, queue, prio; + u8 i; + + netdev_reset_tc(pdata->netdev); + if (!pdata->num_tcs) + return; + + netdev_set_num_tc(pdata->netdev, pdata->num_tcs); + + for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { + while ((queue < pdata->tx_q_count) && + (pdata->q2tc_map[queue] == i)) + queue++; + + netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n", + i, offset, queue - 1); + netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset); + offset = queue; + } + + if (!pdata->ets) + return; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + netdev_set_prio_tc_map(pdata->netdev, prio, + pdata->ets->prio_tc[prio]); +} + static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) { struct ieee_ets *ets = pdata->ets; unsigned int total_weight, min_weight, weight; - unsigned int i; + unsigned int mask, reg, reg_val; + unsigned int i, prio; if (!ets) return; @@ -1309,6 +1379,25 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) min_weight = 1; for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + /* Map the priorities to the traffic class */ + mask = 0; + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + if (ets->prio_tc[prio] == i) + mask |= (1 << prio); + } + mask &= 0xff; + + netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n", + i, mask); + reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG)); + reg_val = XGMAC_IOREAD(pdata, reg); + + reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3)); + reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3)); + + XGMAC_IOWRITE(pdata, reg, reg_val); + + /* Set the traffic class algorithm */ switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: netif_dbg(pdata, drv, pdata->netdev, @@ -1329,38 +1418,12 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) break; } } + + xgbe_config_tc(pdata); } static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) { - struct ieee_pfc *pfc = pdata->pfc; - struct ieee_ets *ets = pdata->ets; - unsigned int mask, reg, reg_val; - unsigned int tc, prio; - - if (!pfc || !ets) - return; - - for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { - mask = 0; - for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { - if ((pfc->pfc_en & (1 << prio)) && - (ets->prio_tc[prio] == tc)) - mask |= (1 << prio); - } - mask &= 0xff; - - netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n", - tc, mask); - reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); - reg_val = XGMAC_IOREAD(pdata, reg); - - reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); - reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); - - XGMAC_IOWRITE(pdata, reg, reg_val); - } - xgbe_config_flow_control(pdata); } @@ -2595,6 +2658,32 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) } } +static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, + unsigned int queue) +{ + unsigned int rx_status; + unsigned long rx_timeout; + + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ + rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, rx_timeout)) { + rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); + if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && + (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, rx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + static void xgbe_enable_rx(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; @@ -2633,6 +2722,10 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < pdata->rx_q_count; i++) + xgbe_prepare_rx_stop(pdata, i); + /* Disable each Rx queue */ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); @@ -2881,6 +2974,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; /* For Data Center Bridging config */ + hw_if->config_tc = xgbe_config_tc; hw_if->config_dcb_tc = xgbe_config_dcb_tc; hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 8a9b493566c9..33606840ae15 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -356,7 +356,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule(&pdata->napi); + __napi_schedule_irqoff(&pdata->napi); } } @@ -409,7 +409,7 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) disable_irq_nosync(channel->dma_irq); /* Turn on polling */ - __napi_schedule(&channel->napi); + __napi_schedule_irqoff(&channel->napi); } return IRQ_HANDLED; @@ -1626,30 +1626,22 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ -static int xgbe_setup_tc(struct net_device *netdev, u8 tc) +static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc_to_netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - unsigned int offset, queue; - u8 i; + u8 tc; - if (tc && (tc != pdata->hw_feat.tc_cnt)) + if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO) return -EINVAL; - if (tc) { - netdev_set_num_tc(netdev, tc); - for (i = 0, queue = 0, offset = 0; i < tc; i++) { - while ((queue < pdata->tx_q_count) && - (pdata->q2tc_map[queue] == i)) - queue++; - - netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n", - i, offset, queue - 1); - netdev_set_tc_queue(netdev, i, queue - offset, offset); - offset = queue; - } - } else { - netdev_reset_tc(netdev); - } + tc = tc_to_netdev->tc; + + if (tc > pdata->hw_feat.tc_cnt) + return -EINVAL; + + pdata->num_tcs = tc; + pdata->hw_if.config_tc(pdata); return 0; } @@ -2062,7 +2054,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget) /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ - napi_complete(napi); + napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ enable_irq(channel->dma_irq); @@ -2104,7 +2096,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget) /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ - napi_complete(napi); + napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ xgbe_enable_rx_tx_ints(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 6040293db9c1..11d9f0c5b78b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -318,8 +318,20 @@ static int xgbe_set_settings(struct net_device *netdev, if (cmd->autoneg == AUTONEG_DISABLE) { switch (speed) { case SPEED_10000: + break; case SPEED_2500: + if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) { + netdev_err(netdev, "unsupported speed %u\n", + speed); + return -EINVAL; + } + break; case SPEED_1000: + if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) { + netdev_err(netdev, "unsupported speed %u\n", + speed); + return -EINVAL; + } break; default: netdev_err(netdev, "unsupported speed %u\n", speed); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 618d952c2984..3eee3201b58f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -363,7 +363,7 @@ static int xgbe_probe(struct platform_device *pdev) platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); - mutex_init(&pdata->xpcs_mutex); + spin_lock_init(&pdata->xpcs_lock); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 446058081866..84c5d296d13e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -626,10 +626,22 @@ static irqreturn_t xgbe_an_isr(int irq, void *data) netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n"); - /* Interrupt reason must be read and cleared outside of IRQ context */ - disable_irq_nosync(pdata->an_irq); + /* Disable AN interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + + /* Save the interrupt(s) that fired */ + pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); - queue_work(pdata->an_workqueue, &pdata->an_irq_work); + if (pdata->an_int) { + /* Clear the interrupt(s) that fired and process them */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); + + queue_work(pdata->an_workqueue, &pdata->an_irq_work); + } else { + /* Enable AN interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, + XGBE_AN_INT_MASK); + } return IRQ_HANDLED; } @@ -673,34 +685,26 @@ static void xgbe_an_state_machine(struct work_struct *work) struct xgbe_prv_data, an_work); enum xgbe_an cur_state = pdata->an_state; - unsigned int int_reg, int_mask; mutex_lock(&pdata->an_mutex); - /* Read the interrupt */ - int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); - if (!int_reg) + if (!pdata->an_int) goto out; next_int: - if (int_reg & XGBE_AN_PG_RCV) { + if (pdata->an_int & XGBE_AN_PG_RCV) { pdata->an_state = XGBE_AN_PAGE_RECEIVED; - int_mask = XGBE_AN_PG_RCV; - } else if (int_reg & XGBE_AN_INC_LINK) { + pdata->an_int &= ~XGBE_AN_PG_RCV; + } else if (pdata->an_int & XGBE_AN_INC_LINK) { pdata->an_state = XGBE_AN_INCOMPAT_LINK; - int_mask = XGBE_AN_INC_LINK; - } else if (int_reg & XGBE_AN_INT_CMPLT) { + pdata->an_int &= ~XGBE_AN_INC_LINK; + } else if (pdata->an_int & XGBE_AN_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; - int_mask = XGBE_AN_INT_CMPLT; + pdata->an_int &= ~XGBE_AN_INT_CMPLT; } else { pdata->an_state = XGBE_AN_ERROR; - int_mask = 0; } - /* Clear the interrupt to be processed */ - int_reg &= ~int_mask; - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg); - pdata->an_result = pdata->an_state; again: @@ -740,14 +744,14 @@ again: } if (pdata->an_state == XGBE_AN_NO_LINK) { - int_reg = 0; + pdata->an_int = 0; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } else if (pdata->an_state == XGBE_AN_ERROR) { netdev_err(pdata->netdev, "error during auto-negotiation, state=%u\n", cur_state); - int_reg = 0; + pdata->an_int = 0; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } @@ -765,11 +769,12 @@ again: if (cur_state != pdata->an_state) goto again; - if (int_reg) + if (pdata->an_int) goto next_int; out: - enable_irq(pdata->an_irq); + /* Enable AN interrupts on the way out */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK); mutex_unlock(&pdata->an_mutex); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e234b9970318..98d9d63c4353 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -673,6 +673,7 @@ struct xgbe_hw_if { u64 (*get_tx_tstamp)(struct xgbe_prv_data *); /* For Data Center Bridging config */ + void (*config_tc)(struct xgbe_prv_data *); void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *); @@ -773,8 +774,8 @@ struct xgbe_prv_data { /* Overall device lock */ spinlock_t lock; - /* XPCS indirect addressing mutex */ - struct mutex xpcs_mutex; + /* XPCS indirect addressing lock */ + spinlock_t xpcs_lock; /* RSS addressing mutex */ struct mutex rss_mutex; @@ -880,6 +881,7 @@ struct xgbe_prv_data { struct ieee_pfc *pfc; unsigned int q2tc_map[XGBE_MAX_QUEUES]; unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; + u8 num_tcs; /* Hardware features of the device */ struct xgbe_hw_features hw_feat; @@ -925,6 +927,7 @@ struct xgbe_prv_data { u32 serdes_dfe_tap_ena[XGBE_SPEEDS]; /* Auto-negotiation state machine support */ + unsigned int an_int; struct mutex an_mutex; enum xgbe_an an_result; enum xgbe_an an_state; diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile index 700b5abe5de5..f46321f68315 100644 --- a/drivers/net/ethernet/apm/xgene/Makefile +++ b/drivers/net/ethernet/apm/xgene/Makefile @@ -3,5 +3,6 @@ # xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \ - xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o + xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o \ + xgene_enet_cle.o obj-$(CONFIG_NET_XGENE) += xgene-enet.o diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c new file mode 100644 index 000000000000..b212488606da --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -0,0 +1,734 @@ +/* Applied Micro X-Gene SoC Ethernet Classifier structures + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Authors: Khuong Dinh <kdinh@apm.com> + * Tanmay Inamdar <tinamdar@apm.com> + * Iyappan Subramanian <isubramanian@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "xgene_enet_main.h" + +/* interfaces to convert structures to HW recognized bit formats */ +static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, + enum xgene_cle_prot_type type, u32 len, + u32 *reg) +{ + *reg = SET_VAL(SB_IPFRAG, frag) | + SET_VAL(SB_IPPROT, type) | + SET_VAL(SB_IPVER, ver) | + SET_VAL(SB_HDRLEN, len); +} + +static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, + u32 nfpsel, u32 *idt_reg) +{ + *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | + SET_VAL(IDT_FPSEL, fpsel) | + SET_VAL(IDT_NFPSEL, nfpsel); +} + +static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, + struct xgene_cle_dbptr *dbptr, u32 *buf) +{ + buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | + SET_VAL(CLE_DSTQIDL, dbptr->dstqid); + + buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | + SET_VAL(CLE_PRIORITY, dbptr->cle_priority); +} + +static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf) +{ + u32 i, j = 0; + u32 data; + + buf[j++] = SET_VAL(CLE_TYPE, kn->node_type); + for (i = 0; i < kn->num_keys; i++) { + struct xgene_cle_ptree_key *key = &kn->key[i]; + + if (!(i % 2)) { + buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) | + SET_VAL(CLE_KN_RPTR, key->result_pointer); + } else { + data = SET_VAL(CLE_KN_PRIO, key->priority) | + SET_VAL(CLE_KN_RPTR, key->result_pointer); + buf[j++] |= (data << 16); + } + } +} + +static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn, + u32 *buf, u32 jb) +{ + struct xgene_cle_ptree_branch *br; + u32 i, j = 0; + u32 npp; + + buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) | + SET_VAL(CLE_DN_LASTN, dn->last_node) | + SET_VAL(CLE_DN_HLS, dn->hdr_len_store) | + SET_VAL(CLE_DN_EXT, dn->hdr_extn) | + SET_VAL(CLE_DN_BSTOR, dn->byte_store) | + SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) | + SET_VAL(CLE_DN_RPTR, dn->result_pointer); + + for (i = 0; i < dn->num_branches; i++) { + br = &dn->branch[i]; + npp = br->next_packet_pointer; + + if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE)) + npp += jb; + + buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) | + SET_VAL(CLE_BR_NPPTR, npp) | + SET_VAL(CLE_BR_JB, br->jump_bw) | + SET_VAL(CLE_BR_JR, br->jump_rel) | + SET_VAL(CLE_BR_OP, br->operation) | + SET_VAL(CLE_BR_NNODE, br->next_node) | + SET_VAL(CLE_BR_NBR, br->next_branch); + + buf[j++] = SET_VAL(CLE_BR_DATA, br->data) | + SET_VAL(CLE_BR_MASK, br->mask); + } +} + +static int xgene_cle_poll_cmd_done(void __iomem *base, + enum xgene_cle_cmd_type cmd) +{ + u32 status, loop = 10; + int ret = -EBUSY; + + while (loop--) { + status = ioread32(base + INDCMD_STATUS); + if (status & cmd) { + ret = 0; + break; + } + usleep_range(1000, 2000); + } + + return ret; +} + +static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs, + u32 index, enum xgene_cle_dram_type type, + enum xgene_cle_cmd_type cmd) +{ + enum xgene_cle_parser parser = cle->active_parser; + void __iomem *base = cle->base; + u32 i, j, ind_addr; + u8 port, nparsers; + int ret = 0; + + /* PTREE_RAM onwards, DRAM regions are common for all parsers */ + nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers; + + for (i = 0; i < nparsers; i++) { + port = i; + if ((type < PTREE_RAM) && (parser != PARSER_ALL)) + port = parser; + + ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index; + iowrite32(ind_addr, base + INDADDR); + for (j = 0; j < nregs; j++) + iowrite32(data[j], base + DATA_RAM0 + (j * 4)); + iowrite32(cmd, base + INDCMD); + + ret = xgene_cle_poll_cmd_done(base, cmd); + if (ret) + break; + } + + return ret; +} + +static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + void __iomem *addr, *base = cle->base; + u32 offset = CLE_PORT_OFFSET; + u32 i; + + /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */ + ptree->start_pkt += cle->jump_bytes; + for (i = 0; i < cle->parsers; i++) { + if (cle->active_parser != PARSER_ALL) + addr = base + cle->active_parser * offset; + else + addr = base + (i * offset); + + iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0); + iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0); + } +} + +static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + u32 buf[CLE_DRAM_REGS]; + u32 i; + int ret; + + memset(buf, 0, sizeof(buf)); + for (i = 0; i < ptree->num_dbptr; i++) { + xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf); + ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr, + DB_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + return 0; +} + +static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + struct xgene_cle_ptree_ewdn *dn = ptree->dn; + struct xgene_cle_ptree_kn *kn = ptree->kn; + u32 buf[CLE_DRAM_REGS]; + int i, j, ret; + + memset(buf, 0, sizeof(buf)); + for (i = 0; i < ptree->num_dn; i++) { + xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes); + ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node, + PTREE_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + /* continue node index for key node */ + memset(buf, 0, sizeof(buf)); + for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) { + xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf); + ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node, + PTREE_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + return 0; +} + +static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + int ret; + + ret = xgene_cle_setup_node(pdata, cle); + if (ret) + return ret; + + ret = xgene_cle_setup_dbptr(pdata, cle); + if (ret) + return ret; + + xgene_cle_enable_ptree(pdata, cle); + + return 0; +} + +static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *enet_cle, + struct xgene_cle_dbptr *dbptr, + u32 index, u8 priority) +{ + void __iomem *base = enet_cle->base; + void __iomem *base_addr; + u32 buf[CLE_DRAM_REGS]; + u32 def_cls, offset; + u32 i, j; + + memset(buf, 0, sizeof(buf)); + xgene_cle_dbptr_to_hw(pdata, dbptr, buf); + + for (i = 0; i < enet_cle->parsers; i++) { + if (enet_cle->active_parser != PARSER_ALL) { + offset = enet_cle->active_parser * + CLE_PORT_OFFSET; + } else { + offset = i * CLE_PORT_OFFSET; + } + + base_addr = base + DFCLSRESDB00 + offset; + for (j = 0; j < 6; j++) + iowrite32(buf[j], base_addr + (j * 4)); + + def_cls = ((priority & 0x7) << 10) | (index & 0x3ff); + iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset); + } +} + +static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle) +{ + u32 idx = CLE_PKTRAM_SIZE / sizeof(u32); + u32 mac_hdr_len = ETH_HLEN; + u32 sband, reg = 0; + u32 ipv4_ihl = 5; + u32 hdr_len; + int ret; + + /* Sideband: IPV4/TCP packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, ®); + sband = reg; + + /* Sideband: IPv4/UDP packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, ®); + sband |= (reg << 16); + + ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR); + if (ret) + return ret; + + /* Sideband: IPv4/RAW packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, + hdr_len, ®); + sband = reg; + + /* Sideband: Ethernet II/RAW packets */ + hdr_len = (mac_hdr_len << 5); + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, + hdr_len, ®); + sband |= (reg << 16); + + ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR); + if (ret) + return ret; + + return 0; +} + +static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) +{ + u32 secret_key_ipv4[4]; /* 16 Bytes*/ + int ret = 0; + + get_random_bytes(secret_key_ipv4, 16); + ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0, + RSS_IPV4_HASH_SKEY, CLE_CMD_WR); + return ret; +} + +static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) +{ + u32 fpsel, dstqid, nfpsel, idt_reg, idx; + int i, ret = 0; + u16 pool_id; + + for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { + idx = i % pdata->rxq_cnt; + pool_id = pdata->rx_ring[idx]->buf_pool->id; + fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); + nfpsel = 0; + idt_reg = 0; + + xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg); + ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, + RSS_IDT, CLE_CMD_WR); + if (ret) + return ret; + } + + ret = xgene_cle_set_rss_skeys(&pdata->cle); + if (ret) + return ret; + + return 0; +} + +static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_cle *cle = &pdata->cle; + void __iomem *base = cle->base; + u32 offset, val = 0; + int i, ret = 0; + + offset = CLE_PORT_OFFSET; + for (i = 0; i < cle->parsers; i++) { + if (cle->active_parser != PARSER_ALL) + offset = cle->active_parser * CLE_PORT_OFFSET; + else + offset = i * CLE_PORT_OFFSET; + + /* enable RSS */ + val = (RSS_IPV4_12B << 1) | 0x1; + writel(val, base + RSS_CTRL0 + offset); + } + + /* setup sideband data */ + ret = xgene_cle_set_rss_sband(cle); + if (ret) + return ret; + + /* setup indirection table */ + ret = xgene_cle_set_rss_idt(pdata); + if (ret) + return ret; + + return 0; +} + +static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_cle *enet_cle = &pdata->cle; + struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; + struct xgene_cle_ptree_branch *br; + u32 def_qid, def_fpsel, pool_id; + struct xgene_cle_ptree *ptree; + struct xgene_cle_ptree_kn kn; + int ret; + struct xgene_cle_ptree_ewdn ptree_dn[] = { + { + /* PKT_TYPE_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 2, + .branch = { + { + /* IPV4 */ + .valid = 0, + .next_packet_pointer = 22, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = PKT_PROT_NODE, + .next_branch = 0, + .data = 0x8, + .mask = 0xffff + }, + { + .valid = 0, + .next_packet_pointer = 262, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + }, + }, + { + /* PKT_PROT_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 3, + .branch = { + { + /* TCP */ + .valid = 1, + .next_packet_pointer = 26, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 0, + .data = 0x0600, + .mask = 0xffff + }, + { + /* UDP */ + .valid = 1, + .next_packet_pointer = 26, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 0, + .data = 0x1100, + .mask = 0xffff + }, + { + .valid = 0, + .next_packet_pointer = 260, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* RSS_IPV4_TCP_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = BOTH_BYTES, + .result_pointer = DB_RES_DROP, + .num_branches = 6, + .branch = { + { + /* SRC IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 28, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 1, + .data = 0x0, + .mask = 0xffff + }, + { + /* SRC IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 30, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 2, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 32, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 3, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 34, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 4, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP SRC Port */ + .valid = 0, + .next_packet_pointer = 36, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 5, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP DST Port */ + .valid = 0, + .next_packet_pointer = 256, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* RSS_IPV4_UDP_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = BOTH_BYTES, + .result_pointer = DB_RES_DROP, + .num_branches = 6, + .branch = { + { + /* SRC IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 28, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 1, + .data = 0x0, + .mask = 0xffff + }, + { + /* SRC IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 30, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 2, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 32, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 3, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 34, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 4, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP SRC Port */ + .valid = 0, + .next_packet_pointer = 36, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 5, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP DST Port */ + .valid = 0, + .next_packet_pointer = 256, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* LAST NODE */ + .node_type = EWDN, + .last_node = 1, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 1, + .branch = { + { + .valid = 0, + .next_packet_pointer = 0, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = MAX_NODES, + .next_branch = 0, + .data = 0, + .mask = 0xffff + } + } + } + }; + + ptree = &enet_cle->ptree; + ptree->start_pkt = 12; /* Ethertype */ + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + ret = xgene_cle_setup_rss(pdata); + if (ret) { + netdev_err(pdata->ndev, "RSS initialization failed\n"); + return ret; + } + } else { + br = &ptree_dn[PKT_PROT_NODE].branch[0]; + br->valid = 0; + br->next_packet_pointer = 260; + br->next_node = LAST_NODE; + br->data = 0x0000; + br->mask = 0xffff; + } + + def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); + pool_id = pdata->rx_ring[0]->buf_pool->id; + def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + + memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); + dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; + dbptr[DB_RES_ACCEPT].dstqid = def_qid; + dbptr[DB_RES_ACCEPT].cle_priority = 1; + + dbptr[DB_RES_DEF].fpsel = def_fpsel; + dbptr[DB_RES_DEF].dstqid = def_qid; + dbptr[DB_RES_DEF].cle_priority = 7; + xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], + DB_RES_ACCEPT, 7); + + dbptr[DB_RES_DROP].drop = 1; + + memset(&kn, 0, sizeof(kn)); + kn.node_type = KN; + kn.num_keys = 1; + kn.key[0].priority = 0; + kn.key[0].result_pointer = DB_RES_ACCEPT; + + ptree->dn = ptree_dn; + ptree->kn = &kn; + ptree->dbptr = dbptr; + ptree->num_dn = MAX_NODES; + ptree->num_kn = 1; + ptree->num_dbptr = DB_MAX_PTRS; + + return xgene_cle_setup_ptree(pdata, enet_cle); +} + +struct xgene_cle_ops xgene_cle3in_ops = { + .cle_init = xgene_enet_cle_init, +}; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h new file mode 100644 index 000000000000..29a17abdd828 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -0,0 +1,295 @@ +/* Applied Micro X-Gene SoC Ethernet Classifier structures + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Authors: Khuong Dinh <kdinh@apm.com> + * Tanmay Inamdar <tinamdar@apm.com> + * Iyappan Subramanian <isubramanian@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __XGENE_ENET_CLE_H__ +#define __XGENE_ENET_CLE_H__ + +#include <linux/io.h> +#include <linux/random.h> + +/* Register offsets */ +#define INDADDR 0x04 +#define INDCMD 0x08 +#define INDCMD_STATUS 0x0c +#define DATA_RAM0 0x10 +#define SNPTR0 0x0100 +#define SPPTR0 0x0104 +#define DFCLSRESDBPTR0 0x0108 +#define DFCLSRESDB00 0x010c +#define RSS_CTRL0 0x0000013c + +#define CLE_CMD_TO 10 /* ms */ +#define CLE_PKTRAM_SIZE 256 /* bytes */ +#define CLE_PORT_OFFSET 0x200 +#define CLE_DRAM_REGS 17 + +#define CLE_DN_TYPE_LEN 2 +#define CLE_DN_TYPE_POS 0 +#define CLE_DN_LASTN_LEN 1 +#define CLE_DN_LASTN_POS 2 +#define CLE_DN_HLS_LEN 1 +#define CLE_DN_HLS_POS 3 +#define CLE_DN_EXT_LEN 2 +#define CLE_DN_EXT_POS 4 +#define CLE_DN_BSTOR_LEN 2 +#define CLE_DN_BSTOR_POS 6 +#define CLE_DN_SBSTOR_LEN 2 +#define CLE_DN_SBSTOR_POS 8 +#define CLE_DN_RPTR_LEN 12 +#define CLE_DN_RPTR_POS 12 + +#define CLE_BR_VALID_LEN 1 +#define CLE_BR_VALID_POS 0 +#define CLE_BR_NPPTR_LEN 9 +#define CLE_BR_NPPTR_POS 1 +#define CLE_BR_JB_LEN 1 +#define CLE_BR_JB_POS 10 +#define CLE_BR_JR_LEN 1 +#define CLE_BR_JR_POS 11 +#define CLE_BR_OP_LEN 3 +#define CLE_BR_OP_POS 12 +#define CLE_BR_NNODE_LEN 9 +#define CLE_BR_NNODE_POS 15 +#define CLE_BR_NBR_LEN 5 +#define CLE_BR_NBR_POS 24 + +#define CLE_BR_DATA_LEN 16 +#define CLE_BR_DATA_POS 0 +#define CLE_BR_MASK_LEN 16 +#define CLE_BR_MASK_POS 16 + +#define CLE_KN_PRIO_POS 0 +#define CLE_KN_PRIO_LEN 3 +#define CLE_KN_RPTR_POS 3 +#define CLE_KN_RPTR_LEN 10 +#define CLE_TYPE_POS 0 +#define CLE_TYPE_LEN 2 + +#define CLE_DSTQIDL_POS 25 +#define CLE_DSTQIDL_LEN 7 +#define CLE_DSTQIDH_POS 0 +#define CLE_DSTQIDH_LEN 5 +#define CLE_FPSEL_POS 21 +#define CLE_FPSEL_LEN 4 +#define CLE_PRIORITY_POS 5 +#define CLE_PRIORITY_LEN 3 + +#define JMP_ABS 0 +#define JMP_REL 1 +#define JMP_FW 0 +#define JMP_BW 1 + +enum xgene_cle_ptree_nodes { + PKT_TYPE_NODE, + PKT_PROT_NODE, + RSS_IPV4_TCP_NODE, + RSS_IPV4_UDP_NODE, + LAST_NODE, + MAX_NODES +}; + +enum xgene_cle_byte_store { + NO_BYTE, + FIRST_BYTE, + SECOND_BYTE, + BOTH_BYTES +}; + +/* Preclassification operation types */ +enum xgene_cle_node_type { + INV, + KN, + EWDN, + RES_NODE +}; + +/* Preclassification operation types */ +enum xgene_cle_op_type { + EQT, + NEQT, + LTEQT, + GTEQT, + AND, + NAND +}; + +enum xgene_cle_parser { + PARSER0, + PARSER1, + PARSER2, + PARSER_ALL +}; + +#define XGENE_CLE_DRAM(type) (((type) & 0xf) << 28) +enum xgene_cle_dram_type { + PKT_RAM, + RSS_IDT, + RSS_IPV4_HASH_SKEY, + PTREE_RAM = 0xc, + AVL_RAM, + DB_RAM +}; + +enum xgene_cle_cmd_type { + CLE_CMD_WR = 1, + CLE_CMD_RD = 2, + CLE_CMD_AVL_ADD = 8, + CLE_CMD_AVL_DEL = 16, + CLE_CMD_AVL_SRCH = 32 +}; + +enum xgene_cle_ipv4_rss_hashtype { + RSS_IPV4_8B, + RSS_IPV4_12B, +}; + +enum xgene_cle_prot_type { + XGENE_CLE_TCP, + XGENE_CLE_UDP, + XGENE_CLE_ESP, + XGENE_CLE_OTHER +}; + +enum xgene_cle_prot_version { + XGENE_CLE_IPV4, +}; + +enum xgene_cle_ptree_dbptrs { + DB_RES_DROP, + DB_RES_DEF, + DB_RES_ACCEPT, + DB_MAX_PTRS +}; + +/* RSS sideband signal info */ +#define SB_IPFRAG_POS 0 +#define SB_IPFRAG_LEN 1 +#define SB_IPPROT_POS 1 +#define SB_IPPROT_LEN 2 +#define SB_IPVER_POS 3 +#define SB_IPVER_LEN 1 +#define SB_HDRLEN_POS 4 +#define SB_HDRLEN_LEN 12 + +/* RSS indirection table */ +#define XGENE_CLE_IDT_ENTRIES 128 +#define IDT_DSTQID_POS 0 +#define IDT_DSTQID_LEN 12 +#define IDT_FPSEL_POS 12 +#define IDT_FPSEL_LEN 4 +#define IDT_NFPSEL_POS 16 +#define IDT_NFPSEL_LEN 4 + +struct xgene_cle_ptree_branch { + bool valid; + u16 next_packet_pointer; + bool jump_bw; + bool jump_rel; + u8 operation; + u16 next_node; + u8 next_branch; + u16 data; + u16 mask; +}; + +struct xgene_cle_ptree_ewdn { + u8 node_type; + bool last_node; + bool hdr_len_store; + u8 hdr_extn; + u8 byte_store; + u8 search_byte_store; + u16 result_pointer; + u8 num_branches; + struct xgene_cle_ptree_branch branch[6]; +}; + +struct xgene_cle_ptree_key { + u8 priority; + u16 result_pointer; +}; + +struct xgene_cle_ptree_kn { + u8 node_type; + u8 num_keys; + struct xgene_cle_ptree_key key[32]; +}; + +struct xgene_cle_dbptr { + u8 split_boundary; + u8 mirror_nxtfpsel; + u8 mirror_fpsel; + u16 mirror_dstqid; + u8 drop; + u8 mirror; + u8 hdr_data_split; + u64 hopinfomsbs; + u8 DR; + u8 HR; + u64 hopinfomlsbs; + u16 h0enq_num; + u8 h0fpsel; + u8 nxtfpsel; + u8 fpsel; + u16 dstqid; + u8 cle_priority; + u8 cle_flowgroup; + u8 cle_perflow; + u8 cle_insert_timestamp; + u8 stash; + u8 in; + u8 perprioen; + u8 perflowgroupen; + u8 perflowen; + u8 selhash; + u8 selhdrext; + u8 mirror_nxtfpsel_msb; + u8 mirror_fpsel_msb; + u8 hfpsel_msb; + u8 nxtfpsel_msb; + u8 fpsel_msb; +}; + +struct xgene_cle_ptree { + struct xgene_cle_ptree_ewdn *dn; + struct xgene_cle_ptree_kn *kn; + struct xgene_cle_dbptr *dbptr; + u32 num_dn; + u32 num_kn; + u32 num_dbptr; + u32 start_node; + u32 start_pkt; + u32 start_dbptr; +}; + +struct xgene_enet_cle { + void __iomem *base; + struct xgene_cle_ptree ptree; + enum xgene_cle_parser active_parser; + u32 parsers; + u32 max_nodes; + u32 max_dbptrs; + u32 jump_bytes; +}; + +extern struct xgene_cle_ops xgene_cle3in_ops; + +#endif /* __XGENE_ENET_CLE_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index db55c9f6e8e1..39e081a70f5b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -204,6 +204,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) return num_msgs; } +static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) +{ + u32 data = 0x7777; + + xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); +} + void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, struct xgene_enet_pdata *pdata, enum xgene_enet_err_code status) @@ -892,4 +903,5 @@ struct xgene_ring_ops xgene_ring1_ops = { .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, + .coalesce = xgene_enet_setup_coalescing, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 8a9091039ab4..ba7da98af2ef 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -54,6 +54,11 @@ enum xgene_enet_rm { #define IS_BUFFER_POOL BIT(20) #define PREFETCH_BUF_EN BIT(21) #define CSR_RING_ID_BUF 0x000c +#define CSR_PBM_COAL 0x0014 +#define CSR_PBM_CTICK1 0x001c +#define CSR_PBM_CTICK2 0x0020 +#define CSR_THRESHOLD0_SET1 0x0030 +#define CSR_THRESHOLD1_SET1 0x0034 #define CSR_RING_NE_INT_MODE 0x017c #define CSR_RING_CONFIG 0x006c #define CSR_RING_WR_BASE 0x0070 @@ -101,6 +106,7 @@ enum xgene_enet_rm { #define MAC_OFFSET 0x30 #define BLOCK_ETH_CSR_OFFSET 0x2000 +#define BLOCK_ETH_CLE_CSR_OFFSET 0x6000 #define BLOCK_ETH_RING_IF_OFFSET 0x9000 #define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000 #define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index a4799c1fc7d4..8d4c1ad2fc60 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -93,13 +93,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, return 0; } -static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) -{ - struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); - - return ((u16)pdata->rm << 10) | ring->num; -} - static u8 xgene_enet_hdr_len(const void *data) { const struct ethhdr *eth = data; @@ -189,7 +182,6 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, static u64 xgene_enet_work_msg(struct sk_buff *skb) { struct net_device *ndev = skb->dev; - struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct iphdr *iph; u8 l3hlen = 0, l4hlen = 0; u8 ethhdr, proto = 0, csum_enable = 0; @@ -235,10 +227,6 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb) if (!mss || ((skb->len - hdr_len) <= mss)) goto out; - if (mss != pdata->mss) { - pdata->mss = mss; - pdata->mac_ops->set_mss(pdata); - } hopinfo |= SET_BIT(ET); } } else if (iph->protocol == IPPROTO_UDP) { @@ -420,7 +408,7 @@ out: raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | SET_VAL(USERINFO, tx_ring->tail)); tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; - pdata->tx_level += count; + pdata->tx_level[tx_ring->cp_ring->index] += count; tx_ring->tail = tail; return count; @@ -430,15 +418,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; - u32 tx_level = pdata->tx_level; + struct xgene_enet_desc_ring *tx_ring; + int index = skb->queue_mapping; + u32 tx_level = pdata->tx_level[index]; int count; - if (tx_level < pdata->txc_level) - tx_level += ((typeof(pdata->tx_level))~0U); + tx_ring = pdata->tx_ring[index]; + if (tx_level < pdata->txc_level[index]) + tx_level += ((typeof(pdata->tx_level[index]))~0U); - if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { - netif_stop_queue(ndev); + if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) { + netif_stop_subqueue(ndev, index); return NETDEV_TX_BUSY; } @@ -536,7 +526,8 @@ static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, int budget) { - struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + struct net_device *ndev = ring->ndev; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_raw_desc *raw_desc, *exp_desc; u16 head = ring->head; u16 slots = ring->slots - 1; @@ -580,7 +571,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, desc_count++; processed++; if (is_completion) - pdata->txc_level += desc_count; + pdata->txc_level[ring->index] += desc_count; if (ret) break; @@ -590,8 +581,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, pdata->ring_ops->wr_cmd(ring, -count); ring->head = head; - if (netif_queue_stopped(ring->ndev)) - netif_start_queue(ring->ndev); + if (__netif_subqueue_stopped(ndev, ring->index)) + netif_start_subqueue(ndev, ring->index); } return processed; @@ -616,8 +607,16 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) static void xgene_enet_timeout(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct netdev_queue *txq; + int i; pdata->mac_ops->reset(pdata); + + for (i = 0; i < pdata->txq_cnt; i++) { + txq = netdev_get_tx_queue(ndev, i); + txq->trans_start = jiffies; + netif_tx_start_queue(txq); + } } static int xgene_enet_register_irq(struct net_device *ndev) @@ -625,16 +624,22 @@ static int xgene_enet_register_irq(struct net_device *ndev) struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *ring; - int ret; + int ret = 0, i; - ring = pdata->rx_ring; - ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, - IRQF_SHARED, ring->irq_name, ring); - if (ret) - netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ring->irq_name, ring); + if (ret) { + netdev_err(ndev, "Failed to request irq %s\n", + ring->irq_name); + } + } - if (pdata->cq_cnt) { - ring = pdata->tx_ring->cp_ring; + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, IRQF_SHARED, ring->irq_name, ring); if (ret) { @@ -649,27 +654,38 @@ static int xgene_enet_register_irq(struct net_device *ndev) static void xgene_enet_free_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata; + struct xgene_enet_desc_ring *ring; struct device *dev; + int i; pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); - devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); - if (pdata->cq_cnt) { - devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, - pdata->tx_ring->cp_ring); + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); + } + + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); } } static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - napi_enable(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + napi_enable(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; napi_enable(napi); } } @@ -677,12 +693,15 @@ static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - napi_disable(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + napi_disable(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; napi_disable(napi); } } @@ -693,6 +712,14 @@ static int xgene_enet_open(struct net_device *ndev) const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int ret; + ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt); + if (ret) + return ret; + + ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt); + if (ret) + return ret; + mac_ops->tx_enable(pdata); mac_ops->rx_enable(pdata); @@ -715,6 +742,7 @@ static int xgene_enet_close(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); const struct xgene_mac_ops *mac_ops = pdata->mac_ops; + int i; netif_stop_queue(ndev); @@ -728,7 +756,8 @@ static int xgene_enet_close(struct net_device *ndev) xgene_enet_free_irq(ndev); xgene_enet_napi_disable(pdata); - xgene_enet_process_ring(pdata->rx_ring, -1); + for (i = 0; i < pdata->rxq_cnt; i++) + xgene_enet_process_ring(pdata->rx_ring[i], -1); return 0; } @@ -748,18 +777,26 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) { struct xgene_enet_desc_ring *buf_pool; + struct xgene_enet_desc_ring *ring; + int i; - if (pdata->tx_ring) { - xgene_enet_delete_ring(pdata->tx_ring); - pdata->tx_ring = NULL; + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + if (ring) { + xgene_enet_delete_ring(ring); + pdata->tx_ring[i] = NULL; + } } - if (pdata->rx_ring) { - buf_pool = pdata->rx_ring->buf_pool; - xgene_enet_delete_bufpool(buf_pool); - xgene_enet_delete_ring(buf_pool); - xgene_enet_delete_ring(pdata->rx_ring); - pdata->rx_ring = NULL; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (ring) { + buf_pool = ring->buf_pool; + xgene_enet_delete_bufpool(buf_pool); + xgene_enet_delete_ring(buf_pool); + xgene_enet_delete_ring(ring); + pdata->rx_ring[i] = NULL; + } } } @@ -814,24 +851,29 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; struct xgene_enet_desc_ring *ring; + int i; - ring = pdata->tx_ring; - if (ring) { - if (ring->cp_ring && ring->cp_ring->cp_skb) - devm_kfree(dev, ring->cp_ring->cp_skb); - if (ring->cp_ring && pdata->cq_cnt) - xgene_enet_free_desc_ring(ring->cp_ring); - xgene_enet_free_desc_ring(ring); - } - - ring = pdata->rx_ring; - if (ring) { - if (ring->buf_pool) { - if (ring->buf_pool->rx_skb) - devm_kfree(dev, ring->buf_pool->rx_skb); - xgene_enet_free_desc_ring(ring->buf_pool); + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + if (ring) { + if (ring->cp_ring && ring->cp_ring->cp_skb) + devm_kfree(dev, ring->cp_ring->cp_skb); + if (ring->cp_ring && pdata->cq_cnt) + xgene_enet_free_desc_ring(ring->cp_ring); + xgene_enet_free_desc_ring(ring); + } + } + + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (ring) { + if (ring->buf_pool) { + if (ring->buf_pool->rx_skb) + devm_kfree(dev, ring->buf_pool->rx_skb); + xgene_enet_free_desc_ring(ring->buf_pool); + } + xgene_enet_free_desc_ring(ring); } - xgene_enet_free_desc_ring(ring); } } @@ -944,104 +986,120 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; u16 ring_id; - int ret, size; - - /* allocate rx descriptor ring */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); - rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_16KB, ring_id); - if (!rx_ring) { - ret = -ENOMEM; - goto err; - } - - /* allocate buffer pool for receiving packets */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); - buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_2KB, ring_id); - if (!buf_pool) { - ret = -ENOMEM; - goto err; - } - - rx_ring->nbufpool = NUM_BUFPOOL; - rx_ring->buf_pool = buf_pool; - rx_ring->irq = pdata->rx_irq; - if (!pdata->cq_cnt) { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", - ndev->name); - } else { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); - } - buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, - sizeof(struct sk_buff *), GFP_KERNEL); - if (!buf_pool->rx_skb) { - ret = -ENOMEM; - goto err; - } + int i, ret, size; - buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); - rx_ring->buf_pool = buf_pool; - pdata->rx_ring = rx_ring; + for (i = 0; i < pdata->rxq_cnt; i++) { + /* allocate rx descriptor ring */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!rx_ring) { + ret = -ENOMEM; + goto err; + } - /* allocate tx descriptor ring */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); - tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_16KB, ring_id); - if (!tx_ring) { - ret = -ENOMEM; - goto err; - } + /* allocate buffer pool for receiving packets */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); + buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_2KB, + ring_id); + if (!buf_pool) { + ret = -ENOMEM; + goto err; + } - size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; - tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs, + rx_ring->nbufpool = NUM_BUFPOOL; + rx_ring->buf_pool = buf_pool; + rx_ring->irq = pdata->irqs[i]; + if (!pdata->cq_cnt) { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", + ndev->name); + } else { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d", + ndev->name, i); + } + buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, + sizeof(struct sk_buff *), GFP_KERNEL); - if (!tx_ring->exp_bufs) { - ret = -ENOMEM; - goto err; - } + if (!buf_pool->rx_skb) { + ret = -ENOMEM; + goto err; + } - pdata->tx_ring = tx_ring; + buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); + rx_ring->buf_pool = buf_pool; + pdata->rx_ring[i] = rx_ring; + } - if (!pdata->cq_cnt) { - cp_ring = pdata->rx_ring; - } else { - /* allocate tx completion descriptor ring */ - ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); - cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + for (i = 0; i < pdata->txq_cnt; i++) { + /* allocate tx descriptor ring */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); + tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); - if (!cp_ring) { + if (!tx_ring) { ret = -ENOMEM; goto err; } - cp_ring->irq = pdata->txc_irq; - snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); - } - cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, - sizeof(struct sk_buff *), GFP_KERNEL); - if (!cp_ring->cp_skb) { - ret = -ENOMEM; - goto err; - } + size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; + tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, + &dma_exp_bufs, + GFP_KERNEL); + if (!tx_ring->exp_bufs) { + ret = -ENOMEM; + goto err; + } - size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; - cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, - size, GFP_KERNEL); - if (!cp_ring->frag_dma_addr) { - devm_kfree(dev, cp_ring->cp_skb); - ret = -ENOMEM; - goto err; - } + pdata->tx_ring[i] = tx_ring; + + if (!pdata->cq_cnt) { + cp_ring = pdata->rx_ring[i]; + } else { + /* allocate tx completion descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, + cpu_bufnum++); + cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!cp_ring) { + ret = -ENOMEM; + goto err; + } - pdata->tx_ring->cp_ring = cp_ring; - pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; + cp_ring->index = i; + snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d", + ndev->name, i); + } - pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; + cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!cp_ring->cp_skb) { + ret = -ENOMEM; + goto err; + } + + size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; + cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, + size, GFP_KERNEL); + if (!cp_ring->frag_dma_addr) { + devm_kfree(dev, cp_ring->cp_skb); + ret = -ENOMEM; + goto err; + } + + tx_ring->cp_ring = cp_ring; + tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + } + + pdata->ring_ops->coalesce(pdata->tx_ring[0]); + pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; return 0; @@ -1160,6 +1218,32 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) return 0; } +static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) +{ + struct platform_device *pdev = pdata->pdev; + struct device *dev = &pdev->dev; + int i, ret, max_irqs; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + max_irqs = 1; + else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) + max_irqs = 2; + else + max_irqs = XGENE_MAX_ENET_IRQ; + + for (i = 0; i < max_irqs; i++) { + ret = platform_get_irq(pdev, i); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->irqs[i] = ret; + } + + return 0; +} + static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; @@ -1241,25 +1325,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; - ret = platform_get_irq(pdev, 0); - if (ret <= 0) { - dev_err(dev, "Unable to get ENET Rx IRQ\n"); - ret = ret ? : -ENXIO; + ret = xgene_enet_get_irqs(pdata); + if (ret) return ret; - } - pdata->rx_irq = ret; - - if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { - ret = platform_get_irq(pdev, 1); - if (ret <= 0) { - pdata->cq_cnt = 0; - dev_info(dev, "Unable to get Tx completion IRQ," - "using Rx IRQ instead\n"); - } else { - pdata->cq_cnt = XGENE_MAX_TXC_RINGS; - pdata->txc_irq = ret; - } - } pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { @@ -1272,6 +1340,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) else base_addr = pdata->base_addr; pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; + pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET; pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || @@ -1292,10 +1361,11 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) { + struct xgene_enet_cle *enet_cle = &pdata->cle; struct net_device *ndev = pdata->ndev; struct xgene_enet_desc_ring *buf_pool; u16 dst_ring_num; - int ret; + int i, ret; ret = pdata->port_ops->reset(pdata); if (ret) @@ -1308,16 +1378,36 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) } /* setup buffer pool */ - buf_pool = pdata->rx_ring->buf_pool; - xgene_enet_init_bufpool(buf_pool); - ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); - if (ret) { - xgene_enet_delete_desc_rings(pdata); - return ret; + for (i = 0; i < pdata->rxq_cnt; i++) { + buf_pool = pdata->rx_ring[i]->buf_pool; + xgene_enet_init_bufpool(buf_pool); + ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); + if (ret) { + xgene_enet_delete_desc_rings(pdata); + return ret; + } + } + + dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); + buf_pool = pdata->rx_ring[0]->buf_pool; + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + /* Initialize and Enable PreClassifier Tree */ + enet_cle->max_nodes = 512; + enet_cle->max_dbptrs = 1024; + enet_cle->parsers = 3; + enet_cle->active_parser = PARSER_ALL; + enet_cle->ptree.start_node = 0; + enet_cle->ptree.start_dbptr = 0; + enet_cle->jump_bytes = 8; + ret = pdata->cle_ops->cle_init(pdata); + if (ret) { + netdev_err(ndev, "Preclass Tree init error\n"); + return ret; + } + } else { + pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); } - dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); - pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); pdata->mac_ops->init(pdata); return ret; @@ -1330,16 +1420,26 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) pdata->mac_ops = &xgene_gmac_ops; pdata->port_ops = &xgene_gport_ops; pdata->rm = RM3; + pdata->rxq_cnt = 1; + pdata->txq_cnt = 1; + pdata->cq_cnt = 0; break; case PHY_INTERFACE_MODE_SGMII: pdata->mac_ops = &xgene_sgmac_ops; pdata->port_ops = &xgene_sgport_ops; pdata->rm = RM1; + pdata->rxq_cnt = 1; + pdata->txq_cnt = 1; + pdata->cq_cnt = 1; break; default: pdata->mac_ops = &xgene_xgmac_ops; pdata->port_ops = &xgene_xgport_ops; + pdata->cle_ops = &xgene_cle3in_ops; pdata->rm = RM0; + pdata->rxq_cnt = XGENE_NUM_RX_RING; + pdata->txq_cnt = XGENE_NUM_TX_RING; + pdata->cq_cnt = XGENE_NUM_TXC_RING; break; } @@ -1393,12 +1493,16 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + netif_napi_add(pdata->ndev, napi, xgene_enet_napi, + NAPI_POLL_WEIGHT); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); } @@ -1407,12 +1511,15 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - netif_napi_del(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + netif_napi_del(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; netif_napi_del(napi); } } @@ -1426,7 +1533,8 @@ static int xgene_enet_probe(struct platform_device *pdev) const struct of_device_id *of_id; int ret; - ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); + ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), + XGENE_NUM_RX_RING, XGENE_NUM_TX_RING); if (!ndev) return -ENOMEM; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 70d5b62c125a..175d18890c7a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -25,6 +25,7 @@ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/efi.h> +#include <linux/irq.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/of_net.h> @@ -35,6 +36,7 @@ #include <linux/if_vlan.h> #include <linux/phy.h> #include "xgene_enet_hw.h" +#include "xgene_enet_cle.h" #include "xgene_enet_ring2.h" #define XGENE_DRV_VERSION "v1.0" @@ -47,6 +49,11 @@ #define XGENE_ENET_MSS 1448 #define XGENE_MIN_ENET_FRAME_SIZE 60 +#define XGENE_MAX_ENET_IRQ 8 +#define XGENE_NUM_RX_RING 4 +#define XGENE_NUM_TX_RING 4 +#define XGENE_NUM_TXC_RING 4 + #define START_CPU_BUFNUM_0 0 #define START_ETH_BUFNUM_0 2 #define START_BP_BUFNUM_0 0x22 @@ -71,7 +78,6 @@ #define X2_START_RING_NUM_1 256 #define IRQ_ID_SIZE 16 -#define XGENE_MAX_TXC_RINGS 1 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -101,6 +107,7 @@ struct xgene_enet_desc_ring { void *irq_mbox_addr; u16 dst_ring_num; u8 nbufpool; + u8 index; struct sk_buff *(*rx_skb); struct sk_buff *(*cp_skb); dma_addr_t *frag_dma_addr; @@ -142,6 +149,11 @@ struct xgene_ring_ops { void (*clear)(struct xgene_enet_desc_ring *); void (*wr_cmd)(struct xgene_enet_desc_ring *, int); u32 (*len)(struct xgene_enet_desc_ring *); + void (*coalesce)(struct xgene_enet_desc_ring *); +}; + +struct xgene_cle_ops { + int (*cle_init)(struct xgene_enet_pdata *pdata); }; /* ethernet private data */ @@ -153,15 +165,16 @@ struct xgene_enet_pdata { struct clk *clk; struct platform_device *pdev; enum xgene_enet_id enet_id; - struct xgene_enet_desc_ring *tx_ring; - struct xgene_enet_desc_ring *rx_ring; - u16 tx_level; - u16 txc_level; + struct xgene_enet_desc_ring *tx_ring[XGENE_NUM_TX_RING]; + struct xgene_enet_desc_ring *rx_ring[XGENE_NUM_RX_RING]; + u16 tx_level[XGENE_NUM_TX_RING]; + u16 txc_level[XGENE_NUM_TX_RING]; char *dev_name; u32 rx_buff_cnt; u32 tx_qcnt_hi; - u32 rx_irq; - u32 txc_irq; + u32 irqs[XGENE_MAX_ENET_IRQ]; + u8 rxq_cnt; + u8 txq_cnt; u8 cq_cnt; void __iomem *eth_csr_addr; void __iomem *eth_ring_if_addr; @@ -173,10 +186,12 @@ struct xgene_enet_pdata { void __iomem *ring_cmd_addr; int phy_mode; enum xgene_enet_rm rm; + struct xgene_enet_cle cle; struct rtnl_link_stats64 stats; const struct xgene_mac_ops *mac_ops; const struct xgene_port_ops *port_ops; struct xgene_ring_ops *ring_ops; + struct xgene_cle_ops *cle_ops; struct delayed_work link_work; u32 port_id; u8 cpu_bufnum; @@ -228,6 +243,13 @@ static inline struct device *ndev_to_dev(struct net_device *ndev) return ndev->dev.parent; } +static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + return ((u16)pdata->rm << 10) | ring->num; +} + void xgene_enet_set_ethtool_ops(struct net_device *netdev); #endif /* __XGENE_ENET_MAIN_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index 0b6896bb351e..2b76732add5d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c @@ -190,6 +190,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) return num_msgs; } +static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) +{ + u32 data = 0x7777; + + xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); +} + struct xgene_ring_ops xgene_ring2_ops = { .num_ring_config = X2_NUM_RING_CONFIG, .num_ring_id_shift = 13, @@ -197,4 +208,5 @@ struct xgene_ring_ops xgene_ring2_ops = { .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, + .coalesce = xgene_enet_setup_coalescing, }; diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 52a6b16f57d2..689045186064 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -34,9 +34,9 @@ config EMAC_ROCKCHIP select ARC_EMAC_CORE depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA ---help--- - Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. + Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. This selects Rockchip SoC glue layer support for the - emac device driver. This driver is used for RK3066/RK3188 + emac device driver. This driver is used for RK3036/RK3066/RK3188 EMAC ethernet controller. endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index c31c7407b753..85e821ccfcd2 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -25,17 +25,13 @@ #include "emac.h" #define DRV_NAME "rockchip_emac" -#define DRV_VERSION "1.0" - -#define GRF_MODE_MII (1UL << 0) -#define GRF_MODE_RMII (0UL << 0) -#define GRF_SPEED_10M (0UL << 1) -#define GRF_SPEED_100M (1UL << 1) -#define GRF_SPEED_ENABLE_BIT (1UL << 17) -#define GRF_MODE_ENABLE_BIT (1UL << 16) +#define DRV_VERSION "1.1" struct emac_rockchip_soc_data { - int grf_offset; + unsigned int grf_offset; + unsigned int grf_mode_offset; + unsigned int grf_speed_offset; + bool need_div_macclk; }; struct rockchip_priv_data { @@ -44,23 +40,22 @@ struct rockchip_priv_data { const struct emac_rockchip_soc_data *soc_data; struct regulator *regulator; struct clk *refclk; + struct clk *macclk; }; static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) { struct rockchip_priv_data *emac = priv; + u32 speed_offset = emac->soc_data->grf_speed_offset; u32 data; int err = 0; - /* write-enable bits */ - data = GRF_SPEED_ENABLE_BIT; - switch(speed) { case 10: - data |= GRF_SPEED_10M; + data = (1 << (speed_offset + 16)) | (0 << speed_offset); break; case 100: - data |= GRF_SPEED_100M; + data = (1 << (speed_offset + 16)) | (1 << speed_offset); break; default: pr_err("speed %u not supported\n", speed); @@ -72,14 +67,25 @@ static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) pr_err("unable to apply speed %u to grf (%d)\n", speed, err); } -static const struct emac_rockchip_soc_data emac_rockchip_dt_data[] = { - { .grf_offset = 0x154 }, /* rk3066 */ - { .grf_offset = 0x0a4 }, /* rk3188 */ +static const struct emac_rockchip_soc_data emac_rk3036_emac_data = { + .grf_offset = 0x140, .grf_mode_offset = 8, + .grf_speed_offset = 9, .need_div_macclk = 1, +}; + +static const struct emac_rockchip_soc_data emac_rk3066_emac_data = { + .grf_offset = 0x154, .grf_mode_offset = 0, + .grf_speed_offset = 1, .need_div_macclk = 0, +}; + +static const struct emac_rockchip_soc_data emac_rk3188_emac_data = { + .grf_offset = 0x0a4, .grf_mode_offset = 0, + .grf_speed_offset = 1, .need_div_macclk = 0, }; static const struct of_device_id emac_rockchip_dt_ids[] = { - { .compatible = "rockchip,rk3066-emac", .data = &emac_rockchip_dt_data[0] }, - { .compatible = "rockchip,rk3188-emac", .data = &emac_rockchip_dt_data[1] }, + { .compatible = "rockchip,rk3036-emac", .data = &emac_rk3036_emac_data }, + { .compatible = "rockchip,rk3066-emac", .data = &emac_rk3066_emac_data }, + { .compatible = "rockchip,rk3188-emac", .data = &emac_rk3188_emac_data }, { /* Sentinel */ } }; @@ -110,7 +116,7 @@ static int emac_rockchip_probe(struct platform_device *pdev) interface = of_get_phy_mode(dev->of_node); - /* RK3066 and RK3188 SoCs only support RMII */ + /* RK3036/RK3066/RK3188 SoCs only support RMII */ if (interface != PHY_INTERFACE_MODE_RMII) { dev_err(dev, "unsupported phy interface mode %d\n", interface); err = -ENOTSUPP; @@ -164,15 +170,12 @@ static int emac_rockchip_probe(struct platform_device *pdev) } } - err = arc_emac_probe(ndev, interface); - if (err) - goto out_regulator_disable; - - /* write-enable bits */ - data = GRF_MODE_ENABLE_BIT | GRF_SPEED_ENABLE_BIT; - - data |= GRF_SPEED_100M; - data |= GRF_MODE_RMII; + /* Set speed 100M */ + data = (1 << (priv->soc_data->grf_speed_offset + 16)) | + (1 << priv->soc_data->grf_speed_offset); + /* Set RMII mode */ + data |= (1 << (priv->soc_data->grf_mode_offset + 16)) | + (0 << priv->soc_data->grf_mode_offset); err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); if (err) { @@ -184,6 +187,33 @@ static int emac_rockchip_probe(struct platform_device *pdev) err = clk_set_rate(priv->refclk, 50000000); if (err) dev_err(dev, "failed to change reference clock rate (%d)\n", err); + + if (priv->soc_data->need_div_macclk) { + priv->macclk = devm_clk_get(dev, "macclk"); + if (IS_ERR(priv->macclk)) { + dev_err(dev, "failed to retrieve mac clock (%ld)\n", PTR_ERR(priv->macclk)); + err = PTR_ERR(priv->macclk); + goto out_regulator_disable; + } + + err = clk_prepare_enable(priv->macclk); + if (err) { + dev_err(dev, "failed to enable mac clock (%d)\n", err); + goto out_regulator_disable; + } + + /* RMII TX/RX needs always a rate of 25MHz */ + err = clk_set_rate(priv->macclk, 25000000); + if (err) + dev_err(dev, "failed to change mac clock rate (%d)\n", err); + } + + err = arc_emac_probe(ndev, interface); + if (err) { + dev_err(dev, "failed to probe arc emac (%d)\n", err); + goto out_regulator_disable; + } + return 0; out_regulator_disable: diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index 7712f068f6d4..1fe35e453d43 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -958,13 +958,13 @@ void alx_configure_basic(struct alx_hw *hw) alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); - raw_mtu = hw->mtu + ETH_HLEN; - alx_write_mem32(hw, ALX_MTU, raw_mtu + 8); - if (raw_mtu > ALX_MTU_JUMBO_TH) + raw_mtu = ALX_RAW_MTU(hw->mtu); + alx_write_mem32(hw, ALX_MTU, raw_mtu); + if (raw_mtu > (ALX_MTU_JUMBO_TH + ETH_FCS_LEN + VLAN_HLEN)) hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; - if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) - val = (raw_mtu + 8 + 7) >> 3; + if (raw_mtu < ALX_TXQ1_JUMBO_TSO_TH) + val = (raw_mtu + 7) >> 3; else val = ALX_TXQ1_JUMBO_TSO_TH >> 3; alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h index 15548802d6f8..f289c05f5cb4 100644 --- a/drivers/net/ethernet/atheros/alx/hw.h +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -37,6 +37,7 @@ #include <linux/types.h> #include <linux/mdio.h> #include <linux/pci.h> +#include <linux/if_vlan.h> #include "reg.h" /* Transmit Packet Descriptor, contains 4 32-bit words. @@ -343,12 +344,14 @@ struct alx_rrd { ALX_RSS_HASH_TYPE_IPV4_TCP | \ ALX_RSS_HASH_TYPE_IPV6 | \ ALX_RSS_HASH_TYPE_IPV6_TCP) -#define ALX_DEF_RXBUF_SIZE 1536 +#define ALX_FRAME_PAD 16 +#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) +#define ALX_MAX_FRAME_LEN(_mtu) (ALIGN((ALX_RAW_MTU(_mtu) + ALX_FRAME_PAD), 8)) +#define ALX_DEF_RXBUF_SIZE ALX_MAX_FRAME_LEN(1500) #define ALX_MAX_JUMBO_PKT_SIZE (9*1024) #define ALX_MAX_TSO_PKT_SIZE (7*1024) #define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE -#define ALX_MIN_FRAME_SIZE 68 -#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) +#define ALX_MIN_FRAME_SIZE (ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) #define ALX_MAX_RX_QUEUES 8 #define ALX_MAX_TX_QUEUES 4 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index d3763bc2c561..55b118e876fd 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -704,7 +704,7 @@ static int alx_init_sw(struct alx_priv *alx) hw->smb_timer = 400; hw->mtu = alx->dev->mtu; - alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); + alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); alx->tx_ringsz = 256; alx->rx_ringsz = 512; hw->imt = 200; @@ -805,7 +805,7 @@ static void alx_reinit(struct alx_priv *alx) static int alx_change_mtu(struct net_device *netdev, int mtu) { struct alx_priv *alx = netdev_priv(netdev); - int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int max_frame = ALX_MAX_FRAME_LEN(mtu); if ((max_frame < ALX_MIN_FRAME_SIZE) || (max_frame > ALX_MAX_FRAME_SIZE)) @@ -816,8 +816,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) netdev->mtu = mtu; alx->hw.mtu = mtu; - alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? - ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; + alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); netdev_update_features(netdev); if (netif_running(netdev)) alx_reinit(alx); diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index ecc4a334c507..f71ab2647a3b 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget) nb8800_tx_done(dev); again: - while (work < budget) { + do { struct nb8800_rx_buf *rxb; unsigned int len; @@ -330,7 +330,7 @@ again: rxd->report = 0; last = next; work++; - } + } while (work < budget); if (work) { priv->rx_descs[last].desc.config |= DESC_EOC; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 8550df189ceb..19f7cd02e085 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -151,8 +151,11 @@ config BNX2X_VXLAN config BGMAC tristate "BCMA bus GBit core support" - depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) + depends on BCMA && BCMA_HOST_SOC + depends on HAS_DMA + depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST select PHYLIB + select FIXED_PHY ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. They can be found on BCM47xx SoCs and provide gigabit ethernet. diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index a3b1c07ae0af..74f0a37c4eb6 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2263,24 +2263,16 @@ static int b44_register_phy_one(struct b44 *bp) mii_bus->parent = sdev->dev; mii_bus->phy_mask = ~(1 << bp->phy_addr); snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance); - mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!mii_bus->irq) { - dev_err(sdev->dev, "mii_bus irq allocation failed\n"); - err = -ENOMEM; - goto err_out_mdiobus; - } - - memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR); bp->mii_bus = mii_bus; err = mdiobus_register(mii_bus); if (err) { dev_err(sdev->dev, "failed to register MII bus\n"); - goto err_out_mdiobus_irq; + goto err_out_mdiobus; } - if (!bp->mii_bus->phy_map[bp->phy_addr] && + if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) && (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) { dev_info(sdev->dev, @@ -2313,19 +2305,15 @@ static int b44_register_phy_one(struct b44 *bp) bp->phydev = phydev; bp->old_link = 0; - bp->phy_addr = phydev->addr; + bp->phy_addr = phydev->mdio.addr; - dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; err_out_mdiobus_unregister: mdiobus_unregister(mii_bus); -err_out_mdiobus_irq: - kfree(mii_bus->irq); - err_out_mdiobus: mdiobus_free(mii_bus); @@ -2339,7 +2327,6 @@ static void b44_unregister_phy_one(struct b44 *bp) phy_disconnect(bp->phydev); mdiobus_unregister(mii_bus); - kfree(mii_bus->irq); mdiobus_free(mii_bus); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index a54bafad3538..87c6b5bdd616 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -908,8 +908,7 @@ static int bcm_enet_open(struct net_device *dev) else phydev->advertising &= ~SUPPORTED_Pause; - dev_info(kdev, "attached PHY at address %d [%s]\n", - phydev->addr, phydev->drv->name); + phy_attached_info(phydev); priv->old_link = 0; priv->old_duplex = -1; @@ -1849,17 +1848,8 @@ static int bcm_enet_probe(struct platform_device *pdev) * if a slave is not present on hw */ bus->phy_mask = ~(1 << priv->phy_id); - bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, - GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto out_free_mdio; - } - if (priv->has_phy_interrupt) bus->irq[priv->phy_id] = priv->phy_interrupt; - else - bus->irq[priv->phy_id] = PHY_POLL; ret = mdiobus_register(bus); if (ret) { diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 28f7610b03fe..230f8e6209e5 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -26,6 +26,17 @@ static const struct bcma_device_id bgmac_bcma_tbl[] = { }; MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); +static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) +{ + switch (bgmac->core->bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM53018: + return true; + default: + return false; + } +} + static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { @@ -466,6 +477,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, len -= ETH_FCS_LEN; skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); + if (unlikely(!skb)) { + bgmac_err(bgmac, "build_skb failed\n"); + put_page(virt_to_head_page(buf)); + break; + } skb_put(skb, BGMAC_RX_FRAME_OFFSET + BGMAC_RX_BUF_OFFSET + len); skb_pull(skb, BGMAC_RX_FRAME_OFFSET + @@ -982,11 +998,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac) static void bgmac_miiconfig(struct bgmac *bgmac) { struct bcma_device *core = bgmac->core; - struct bcma_chipinfo *ci = &core->bus->chipinfo; u8 imode; - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) { + if (bgmac_is_bcm4707_family(bgmac)) { bcma_awrite32(core, BCMA_IOCTL, bcma_aread32(core, BCMA_IOCTL) | 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); @@ -1050,9 +1064,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac) } /* Request Misc PLL for corerev > 2 */ - if (core->id.rev > 2 && - ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM53018) { + if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, @@ -1188,8 +1200,7 @@ static void bgmac_enable(struct bgmac *bgmac) break; } - if (ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM53018) { + if (!bgmac_is_bcm4707_family(bgmac)) { rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / @@ -1467,14 +1478,12 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac) static int bgmac_mii_register(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; - int i, err = 0; + int err = 0; - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) + if (bgmac_is_bcm4707_family(bgmac)) return bgmac_fixed_phy_register(bgmac); mii_bus = mdiobus_alloc(); @@ -1490,18 +1499,10 @@ static int bgmac_mii_register(struct bgmac *bgmac) mii_bus->parent = &bgmac->core->dev; mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!mii_bus->irq) { - err = -ENOMEM; - goto err_free_bus; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(mii_bus); if (err) { bgmac_err(bgmac, "Registration of mii bus failed\n"); - goto err_free_irq; + goto err_free_bus; } bgmac->mii_bus = mii_bus; @@ -1522,8 +1523,6 @@ static int bgmac_mii_register(struct bgmac *bgmac) err_unregister_bus: mdiobus_unregister(mii_bus); -err_free_irq: - kfree(mii_bus->irq); err_free_bus: mdiobus_free(mii_bus); return err; @@ -1534,7 +1533,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac) struct mii_bus *mii_bus = bgmac->mii_bus; mdiobus_unregister(mii_bus); - kfree(mii_bus->irq); mdiobus_free(mii_bus); } @@ -1545,7 +1543,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ static int bgmac_probe(struct bcma_device *core) { - struct bcma_chipinfo *ci = &core->bus->chipinfo; struct net_device *net_dev; struct bgmac *bgmac; struct ssb_sprom *sprom = &core->bus->sprom; @@ -1626,8 +1623,7 @@ static int bgmac_probe(struct bcma_device *core) bgmac_chip_reset(bgmac); /* For Northstar, we have to take all GMAC core out of reset */ - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) { + if (bgmac_is_bcm4707_family(bgmac)) { struct bcma_device *ns_core; int ns_gmac; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index cae0956186ce..7dd7490fdac1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1277,8 +1277,7 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, - BNX2X_SP_RTNL_ADD_VXLAN_PORT, - BNX2X_SP_RTNL_DEL_VXLAN_PORT, + BNX2X_SP_RTNL_CHANGE_UDP_PORT, }; enum bnx2x_iov_flag { @@ -1327,6 +1326,17 @@ struct bnx2x_vlan_entry { bool hw; }; +enum bnx2x_udp_port_type { + BNX2X_UDP_PORT_VXLAN, + BNX2X_UDP_PORT_GENEVE, + BNX2X_UDP_PORT_MAX, +}; + +struct bnx2x_udp_tunnel { + u16 dst_port; + u8 count; +}; + struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure @@ -1830,9 +1840,10 @@ struct bnx2x { struct list_head vlan_reg; u16 vlan_cnt; u16 vlan_credit; - u16 vxlan_dst_port; - u8 vxlan_dst_port_count; bool accept_any_vlan; + + /* Vxlan/Geneve related information */ + struct bnx2x_udp_tunnel udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; }; /* Tx queues may be less or equal to Rx queues */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9695a4c4a434..45843d150868 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4272,6 +4272,14 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + return bnx2x_setup_tc(dev, tc->tc); +} + /* called with rtnl_lock */ int bnx2x_change_mac_addr(struct net_device *dev, void *p) { @@ -5086,4 +5094,3 @@ void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, flag); schedule_delayed_work(&bp->sp_rtnl_task, 0); } -EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 4cbb03f87b5a..0e68fadecfdb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,6 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); @@ -923,6 +925,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_start_params *start_params = &func_params.params.start; + u16 port; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); @@ -959,8 +962,14 @@ static inline int bnx2x_func_start(struct bnx2x *bp) start_params->network_cos_mode = STATIC_COS; else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - - start_params->vxlan_dst_port = bp->vxlan_dst_port; + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].dst_port; + start_params->vxlan_dst_port = port; + } + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].dst_port; + start_params->geneve_dst_port = port; + } start_params->inner_rss = 1; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 7ccf6684e0a3..2c6ba046d2a8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -195,6 +195,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, u32 error) { u8 index; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + u8 iscsi_pri_found = 0, fcoe_pri_found = 0; if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n"); @@ -210,29 +211,57 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, bp->dcbx_port_params.app.enabled = true; + /* Use 0 as the default application priority for all. */ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) ttp[index] = 0; - if (app->default_pri < MAX_PFC_PRIORITIES) - ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; - for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { struct dcbx_app_priority_entry *entry = app->app_pri_tbl; + enum traffic_type type = MAX_TRAFFIC_TYPE; if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_ETH_TYPE) && - ETH_TYPE_FCOE == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_FCOE); + DCBX_APP_SF_DEFAULT) && + GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE)) { + type = LLFC_TRAFFIC_TYPE_NW; + } else if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_PORT) && + TCP_PORT_ISCSI == entry[index].app_id) { + type = LLFC_TRAFFIC_TYPE_ISCSI; + iscsi_pri_found = 1; + } else if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE) && + ETH_TYPE_FCOE == entry[index].app_id) { + type = LLFC_TRAFFIC_TYPE_FCOE; + fcoe_pri_found = 1; + } - if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_PORT) && - TCP_PORT_ISCSI == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_ISCSI); + if (type == MAX_TRAFFIC_TYPE) + continue; + + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + type); + } + + /* If we have received a non-zero default application + * priority, then use that for applications which are + * not configured with any priority. + */ + if (ttp[LLFC_TRAFFIC_TYPE_NW] != 0) { + if (!iscsi_pri_found) { + ttp[LLFC_TRAFFIC_TYPE_ISCSI] = + ttp[LLFC_TRAFFIC_TYPE_NW]; + DP(BNX2X_MSG_DCB, + "ISCSI is using default priority.\n"); + } + if (!fcoe_pri_found) { + ttp[LLFC_TRAFFIC_TYPE_FCOE] = + ttp[LLFC_TRAFFIC_TYPE_NW]; + DP(BNX2X_MSG_DCB, + "FCoE is using default priority.\n"); + } } } else { DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 820b7e04bb5f..85a7800bfc12 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -981,6 +981,11 @@ static void bnx2x_get_regs(struct net_device *dev, memcpy(p, &dump_hdr, sizeof(struct dump_header)); p += dump_hdr.header_size + 1; + /* This isn't really an error, but since attention handling is going + * to print the GRC timeouts using this macro, we use the same. + */ + BNX2X_ERR("Generating register dump. Might trigger harmless GRC timeouts\n"); + /* Actually read the registers */ __bnx2x_get_regs(bp, p); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 27aa0802d87d..04523d41b873 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1824,17 +1824,22 @@ struct dcbx_app_priority_entry { u8 pri_bitmap; u8 appBitfield; #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 #define DCBX_APP_ENTRY_SF_SHIFT 4 #define DCBX_APP_SF_ETH_TYPE 0x10 #define DCBX_APP_SF_PORT 0x20 + #define DCBX_APP_SF_UDP 0x40 + #define DCBX_APP_SF_DEFAULT 0x80 #elif defined(__LITTLE_ENDIAN) u8 appBitfield; #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_ENTRY_VALID 0x01 #define DCBX_APP_SF_ETH_TYPE 0x10 #define DCBX_APP_SF_PORT 0x20 + #define DCBX_APP_SF_UDP 0x40 + #define DCBX_APP_SF_DEFAULT 0x80 u8 pri_bitmap; u16 app_id; #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6c4e3a69976f..81fc51c4ec2b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -59,7 +59,9 @@ #include <linux/semaphore.h> #include <linux/stringify.h> #include <linux/vmalloc.h> - +#if IS_ENABLED(CONFIG_GENEVE) +#include <net/geneve.h> +#endif #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" @@ -10076,11 +10078,13 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } -#ifdef CONFIG_BNX2X_VXLAN -static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_GENEVE) +static int bnx2x_udp_port_update(struct bnx2x *bp) { struct bnx2x_func_switch_update_params *switch_update_params; struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_udp_tunnel *udp_tunnel; + u16 vxlan_port = 0, geneve_port = 0; int rc; switch_update_params = &func_params.params.switch_update; @@ -10095,69 +10099,125 @@ static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) /* Function parameters */ __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, &switch_update_params->changes); - switch_update_params->vxlan_dst_port = port; + + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { + udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; + geneve_port = udp_tunnel->dst_port; + switch_update_params->geneve_dst_port = geneve_port; + } + + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { + udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; + vxlan_port = udp_tunnel->dst_port; + switch_update_params->vxlan_dst_port = vxlan_port; + } + + /* Re-enable inner-rss for the offloaded UDP tunnels */ + __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, + &switch_update_params->changes); + rc = bnx2x_func_state_change(bp, &func_params); if (rc) - BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n", - port, rc); + BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n", + vxlan_port, geneve_port, rc); + else + DP(BNX2X_MSG_SP, + "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n", + vxlan_port, geneve_port); + return rc; } -static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) +static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port, + enum bnx2x_udp_port_type type) { - if (!netif_running(bp->dev)) + struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; + + if (!netif_running(bp->dev) || !IS_PF(bp)) + return; + + if (udp_port->count && udp_port->dst_port == port) { + udp_port->count++; return; + } - if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) { - bp->vxlan_dst_port_count++; + if (udp_port->count) { + DP(BNX2X_MSG_SP, + "UDP tunnel [%d] - destination port limit reached\n", + type); return; } - if (bp->vxlan_dst_port_count || !IS_PF(bp)) { - DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); + udp_port->dst_port = port; + udp_port->count = 1; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); +} + +static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port, + enum bnx2x_udp_port_type type) +{ + struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; + + if (!IS_PF(bp)) + return; + + if (!udp_port->count || udp_port->dst_port != port) { + DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n", + type); return; } - bp->vxlan_dst_port = port; - bp->vxlan_dst_port_count = 1; - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); + /* Remove reference, and make certain it's no longer in use */ + udp_port->count--; + if (udp_port->count) + return; + udp_port->dst_port = 0; + + if (netif_running(bp->dev)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); + else + DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n", + type, port); } +#endif +#ifdef CONFIG_BNX2X_VXLAN static void bnx2x_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { struct bnx2x *bp = netdev_priv(netdev); u16 t_port = ntohs(port); - __bnx2x_add_vxlan_port(bp, t_port); + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); } -static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) +static void bnx2x_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) { - if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port || - !IS_PF(bp)) { - DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); - return; - } - bp->vxlan_dst_port_count--; - if (bp->vxlan_dst_port_count) - return; + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); - if (netif_running(bp->dev)) { - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); - } else { - bp->vxlan_dst_port = 0; - netdev_info(bp->dev, "Deleted vxlan dest port %d", port); - } + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); +} +#endif + +#if IS_ENABLED(CONFIG_GENEVE) +static void bnx2x_add_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); + + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); } -static void bnx2x_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void bnx2x_del_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) { struct bnx2x *bp = netdev_priv(netdev); u16 t_port = ntohs(port); - __bnx2x_del_vxlan_port(bp, t_port); + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); } #endif @@ -10169,9 +10229,6 @@ static int bnx2x_close(struct net_device *dev); static void bnx2x_sp_rtnl_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); -#ifdef CONFIG_BNX2X_VXLAN - u16 port; -#endif rtnl_lock(); @@ -10270,23 +10327,27 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); -#ifdef CONFIG_BNX2X_VXLAN - port = bp->vxlan_dst_port; - if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT, - &bp->sp_rtnl_state)) { - if (!bnx2x_vxlan_port_update(bp, port)) - netdev_info(bp->dev, "Added vxlan dest port %d", port); - else - bp->vxlan_dst_port = 0; - } - - if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT, +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_GENEVE) + if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { - if (!bnx2x_vxlan_port_update(bp, 0)) { - netdev_info(bp->dev, - "Deleted vxlan dest port %d", port); - bp->vxlan_dst_port = 0; - vxlan_get_rx_port(bp->dev); + if (bnx2x_udp_port_update(bp)) { + /* On error, forget configuration */ + memset(bp->udp_tunnel_ports, 0, + sizeof(struct bnx2x_udp_tunnel) * + BNX2X_UDP_PORT_MAX); + } else { + /* Since we don't store additional port information, + * if no port is configured for any feature ask for + * information about currently configured ports. + */ +#ifdef CONFIG_BNX2X_VXLAN + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) + vxlan_get_rx_port(bp->dev); +#endif +#if IS_ENABLED(CONFIG_GENEVE) + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) + geneve_get_rx_port(bp->dev); +#endif } } #endif @@ -12368,8 +12429,10 @@ static int bnx2x_init_bp(struct bnx2x *bp) if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_HAS(bp, dcbx_en) && SHMEM2_RD(bp, dcbx_lldp_params_offset) && - SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { + SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) { bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); bnx2x_dcbx_init_params(bp); } else { @@ -12494,6 +12557,10 @@ static int bnx2x_open(struct net_device *dev) if (IS_PF(bp)) vxlan_get_rx_port(dev); #endif +#if IS_ENABLED(CONFIG_GENEVE) + if (IS_PF(bp)) + geneve_get_rx_port(dev); +#endif return 0; } @@ -12994,7 +13061,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_bnx2x, #endif - .ndo_setup_tc = bnx2x_setup_tc, + .ndo_setup_tc = __bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_vlan = bnx2x_set_vf_vlan, @@ -13011,6 +13078,10 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_add_vxlan_port = bnx2x_add_vxlan_port, .ndo_del_vxlan_port = bnx2x_del_vxlan_port, #endif +#if IS_ENABLED(CONFIG_GENEVE) + .ndo_add_geneve_port = bnx2x_add_geneve_port, + .ndo_del_geneve_port = bnx2x_del_geneve_port, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f9569493034b..169920aa39f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1490,10 +1490,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) last = tx_buf->nr_frags; j += 2; - for (k = 0; k < last; k++, j = NEXT_TX(j)) { + for (k = 0; k < last; k++, j++) { + int ring_idx = j & bp->tx_ring_mask; skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; - tx_buf = &txr->tx_buf_ring[j]; + tx_buf = &txr->tx_buf_ring[ring_idx]; dma_unmap_page( &pdev->dev, dma_unmap_addr(tx_buf, mapping), @@ -2614,6 +2615,9 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) /* Write request msg to hwrm channel */ __iowrite32_copy(bp->bar0, data, msg_len / 4); + for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) + writel(0, bp->bar0 + i); + /* currently supports only one outstanding message */ if (intr_process) bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & @@ -3403,7 +3407,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; u16 error_code; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); req.ring_type = ring_type; req.ring_id = cpu_to_le16(ring->fw_ring_id); @@ -4816,8 +4820,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } @@ -5368,9 +5370,16 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u8 tc) +static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) { struct bnxt *bp = netdev_priv(dev); + u8 tc; + + if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + tc = ntc->tc; if (tc > bp->max_tc) { netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", @@ -5708,21 +5717,23 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, { int max_ring_grps = 0; - if (BNXT_PF(bp)) { - *max_tx = bp->pf.max_tx_rings; - *max_rx = bp->pf.max_rx_rings; - *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); - *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); - max_ring_grps = bp->pf.max_hw_ring_grps; - } else { #ifdef CONFIG_BNXT_SRIOV + if (!BNXT_PF(bp)) { *max_tx = bp->vf.max_tx_rings; *max_rx = bp->vf.max_rx_rings; *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); max_ring_grps = bp->vf.max_hw_ring_grps; + } else #endif + { + *max_tx = bp->pf.max_tx_rings; + *max_rx = bp->pf.max_rx_rings; + *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); + *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); + max_ring_grps = bp->pf.max_hw_ring_grps; } + if (bp->flags & BNXT_FLAG_AGG_RINGS) *max_rx >>= 1; *max_rx = min_t(int, *max_rx, max_ring_grps); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 4f62c9fa96d5..922b898e7a32 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -837,6 +837,45 @@ static int bnxt_flash_nvram(struct net_device *dev, return rc; } +static int bnxt_firmware_reset(struct net_device *dev, + u16 dir_type) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + + /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ + /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ + /* (e.g. when firmware isn't already running) */ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; + /* Self-reset ChiMP upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + break; + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + break; + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + req.embedded_proc_type = + FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; + break; + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; + break; + default: + return -EINVAL; + } + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static int bnxt_flash_firmware(struct net_device *dev, u16 dir_type, const u8 *fw_data, @@ -894,10 +933,9 @@ static int bnxt_flash_firmware(struct net_device *dev, /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 0, 0, fw_data, fw_size); - if (rc == 0) { /* Firmware update successful */ - /* TODO: Notify processor it needs to reset itself - */ - } + if (rc == 0) /* Firmware update successful */ + rc = bnxt_firmware_reset(dev, dir_type); + return rc; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 8bdfe53754ba..457c3bc8cfff 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -401,9 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev) * Ethernet MAC ISRs */ if (priv->internal_phy) - priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; - else - priv->mii_bus->irq[phydev->addr] = PHY_POLL; + priv->phydev->irq = PHY_IGNORE_INTERRUPT; return 0; } @@ -477,12 +475,6 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", priv->pdev->name, priv->pdev->id); - bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!bus->irq) { - mdiobus_free(priv->mii_bus); - return -ENOMEM; - } - return 0; } @@ -581,7 +573,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) } if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - phydev = mdio->phy_map[pd->phy_address]; + phydev = mdiobus_get_phy(mdio, pd->phy_address); else phydev = phy_find_first(mdio); @@ -648,7 +640,6 @@ int bcmgenet_mii_init(struct net_device *dev) out: of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return ret; } @@ -659,6 +650,5 @@ void bcmgenet_mii_exit(struct net_device *dev) of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index f557a2aaec23..eacc559679bf 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -238,7 +238,6 @@ struct sbmac_softc { struct napi_struct napi; struct phy_device *phy_dev; /* the associated PHY device */ struct mii_bus *mii_bus; /* the MII bus */ - int phy_irq[PHY_MAX_ADDR]; spinlock_t sbm_lock; /* spin lock */ int sbm_devflags; /* current device flags */ @@ -2250,9 +2249,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) sc->mii_bus->priv = sc; sc->mii_bus->read = sbmac_mii_read; sc->mii_bus->write = sbmac_mii_write; - sc->mii_bus->irq = sc->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - sc->mii_bus->irq[i] = SBMAC_PHY_INT; sc->mii_bus->parent = &pldev->dev; /* @@ -2358,20 +2354,15 @@ static int sbmac_mii_probe(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); struct phy_device *phy_dev; - int i; - for (i = 0; i < PHY_MAX_ADDR; i++) { - phy_dev = sc->mii_bus->phy_map[i]; - if (phy_dev) - break; - } + phy_dev = phy_find_first(sc->mii_bus); if (!phy_dev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENXIO; } - phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, - PHY_INTERFACE_MODE_GMII); + phy_dev = phy_connect(dev, dev_name(&phy_dev->mdio.dev), + &sbmac_mii_poll, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); return PTR_ERR(phy_dev); @@ -2388,11 +2379,10 @@ static int sbmac_mii_probe(struct net_device *dev) SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phy_dev->advertising = phy_dev->supported; - pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phy_dev->drv->name, - dev_name(&phy_dev->dev), phy_dev->irq); + phy_attached_info(phy_dev); + + phy_dev->advertising = phy_dev->supported; sc->phy_dev = phy_dev; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 79789d8e52da..49eea8981332 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1406,7 +1406,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) u32 val; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM50610: case PHY_ID_BCM50610M: @@ -1538,10 +1538,6 @@ static int tg3_mdio_init(struct tg3 *tp) tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); - tp->mdio_bus->irq = &tp->mdio_irq[0]; - - for (i = 0; i < PHY_MAX_ADDR; i++) - tp->mdio_bus->irq[i] = PHY_POLL; /* The bus registration will look for all the PHYs on the mdio bus. * Unfortunately, it does not ensure the PHY is powered up before @@ -1558,7 +1554,7 @@ static int tg3_mdio_init(struct tg3 *tp) return i; } - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (!phydev || !phydev->drv) { dev_warn(&tp->pdev->dev, "No PHY devices\n"); @@ -1968,7 +1964,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) u32 old_tx_mode = tp->tx_mode; if (tg3_flag(tp, USE_PHYLIB)) - autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; + autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; else autoneg = tp->link_config.autoneg; @@ -2004,7 +2000,7 @@ static void tg3_adjust_link(struct net_device *dev) u8 oldflowctrl, linkmesg = 0; u32 mac_mode, lcl_adv, rmt_adv; struct tg3 *tp = netdev_priv(dev); - struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); spin_lock_bh(&tp->lock); @@ -2093,10 +2089,10 @@ static int tg3_phy_init(struct tg3 *tp) /* Bring the PHY back to a known state. */ tg3_bmcr_reset(tp); - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); /* Attach the MAC to the PHY. */ - phydev = phy_connect(tp->dev, dev_name(&phydev->dev), + phydev = phy_connect(tp->dev, phydev_name(phydev), tg3_adjust_link, phydev->interface); if (IS_ERR(phydev)) { dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); @@ -2120,7 +2116,7 @@ static int tg3_phy_init(struct tg3 *tp) SUPPORTED_Asym_Pause); break; default: - phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); return -EINVAL; } @@ -2128,6 +2124,8 @@ static int tg3_phy_init(struct tg3 *tp) phydev->advertising = phydev->supported; + phy_attached_info(phydev); + return 0; } @@ -2138,7 +2136,7 @@ static void tg3_phy_start(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; @@ -2158,13 +2156,13 @@ static void tg3_phy_stop(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); } static void tg3_phy_fini(struct tg3 *tp) { if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } @@ -4048,7 +4046,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) struct phy_device *phydev; u32 phyid, advertising; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; @@ -12018,7 +12016,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, int ret; u32 offset, len, b_offset, odd_len; u8 *buf; - __be32 start, end; + __be32 start = 0, end; if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) @@ -12076,7 +12074,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_ethtool_gset(phydev, cmd); } @@ -12143,7 +12141,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_ethtool_sset(phydev, cmd); } @@ -12298,7 +12296,7 @@ static int tg3_nway_reset(struct net_device *dev) if (tg3_flag(tp, USE_PHYLIB)) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); + r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); } else { u32 bmcr; @@ -12416,7 +12414,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam u32 newadv; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && @@ -13926,7 +13924,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_mii_ioctl(phydev, ifr, cmd); } @@ -17898,13 +17896,7 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_bus_string(tp, str), dev->dev_addr); - if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); - } else { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { char *ethtype; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 31c9f8295953..3b5e98ecba00 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3254,7 +3254,6 @@ struct tg3 { int pcie_readrq; struct mii_bus *mdio_bus; - int mdio_irq[PHY_MAX_ADDR]; int old_link; u8 phy_addr; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 21a0cfc3e7ec..771cc267f217 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -542,39 +542,50 @@ bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, } static void -bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, - u32 sop_ci, u32 nvecs, u32 last_fraglen) +bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs) { + struct bna_rcb *rcb; struct bnad *bnad; - u32 ci, vec, len, totlen = 0; struct bnad_rx_unmap_q *unmap_q; - struct bnad_rx_unmap *unmap; + struct bna_cq_entry *cq, *cmpl; + u32 ci, pi, totlen = 0; + + cq = ccb->sw_q; + pi = ccb->producer_index; + cmpl = &cq[pi]; + rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0]; unmap_q = rcb->unmap_q; bnad = rcb->bnad; + ci = rcb->consumer_index; /* prefetch header */ - prefetch(page_address(unmap_q->unmap[sop_ci].page) + - unmap_q->unmap[sop_ci].page_offset); + prefetch(page_address(unmap_q->unmap[ci].page) + + unmap_q->unmap[ci].page_offset); + + while (nvecs--) { + struct bnad_rx_unmap *unmap; + u32 len; - for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) { unmap = &unmap_q->unmap[ci]; BNA_QE_INDX_INC(ci, rcb->q_depth); dma_unmap_page(&bnad->pcidev->dev, - dma_unmap_addr(&unmap->vector, dma_addr), - unmap->vector.len, DMA_FROM_DEVICE); + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); - len = (vec == nvecs) ? - last_fraglen : unmap->vector.len; + len = ntohs(cmpl->length); skb->truesize += unmap->vector.len; totlen += len; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - unmap->page, unmap->page_offset, len); + unmap->page, unmap->page_offset, len); unmap->page = NULL; unmap->vector.len = 0; + + BNA_QE_INDX_INC(pi, ccb->q_depth); + cmpl = &cq[pi]; } skb->len += totlen; @@ -704,7 +715,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) bnad_cq_setup_skb(bnad, skb, unmap, len); else - bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); + bnad_cq_setup_skb_frags(ccb, skb, nvecs); rcb->rxq->rx_packets++; rcb->rxq->rx_bytes += totlen; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 8b45bc9ac29e..69af049e55a8 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -58,6 +58,9 @@ #define GEM_MTU_MIN_SIZE 68 +#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) +#define MACB_WOL_ENABLED (0x1 << 1) + /* * Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) @@ -441,12 +444,6 @@ static int macb_mii_init(struct macb *bp) bp->mii_bus->parent = &bp->dev->dev; pdata = dev_get_platdata(&bp->pdev->dev); - bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (!bp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; @@ -471,9 +468,6 @@ static int macb_mii_init(struct macb *bp) goto err_out_unregister_bus; } } else { - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - if (pdata) bp->mii_bus->phy_mask = pdata->phy_mask; @@ -481,7 +475,7 @@ static int macb_mii_init(struct macb *bp) } if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; err = macb_mii_probe(bp->dev); if (err) @@ -491,8 +485,6 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); -err_out_free_mdio_irq: - kfree(bp->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(bp->mii_bus); err_out: @@ -1051,6 +1043,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) /* close possible race with dev_close */ if (unlikely(!netif_running(dev))) { queue_writel(queue, IDR, -1); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, -1); break; } @@ -1572,6 +1566,8 @@ static void macb_reset_hw(struct macb *bp) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { queue_writel(queue, IDR, -1); queue_readl(queue, ISR); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, -1); } } @@ -2124,12 +2120,46 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); regs_buff[11] = macb_tx_dma(&bp->queues[0], head); - regs_buff[12] = macb_or_gem_readl(bp, USRIO); + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) + regs_buff[12] = macb_or_gem_readl(bp, USRIO); if (macb_is_gem(bp)) { regs_buff[13] = gem_readl(bp, DMACFG); } } +static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct macb *bp = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + + if (bp->wol & MACB_WOL_ENABLED) + wol->wolopts |= WAKE_MAGIC; + } +} + +static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct macb *bp = netdev_priv(netdev); + + if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || + (wol->wolopts & ~WAKE_MAGIC)) + return -EOPNOTSUPP; + + if (wol->wolopts & WAKE_MAGIC) + bp->wol |= MACB_WOL_ENABLED; + else + bp->wol &= ~MACB_WOL_ENABLED; + + device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); + + return 0; +} + static const struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, @@ -2137,6 +2167,8 @@ static const struct ethtool_ops macb_ethtool_ops = { .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_wol = macb_get_wol, + .set_wol = macb_set_wol, }; static const struct ethtool_ops gem_ethtool_ops = { @@ -2403,19 +2435,21 @@ static int macb_init(struct platform_device *pdev) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; - val = 0; - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) - val = GEM_BIT(RGMII); - else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) - val = MACB_BIT(RMII); - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) - val = MACB_BIT(MII); + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { + val = 0; + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + val = GEM_BIT(RGMII); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(RMII); + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(MII); - if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) - val |= MACB_BIT(CLKEN); + if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) + val |= MACB_BIT(CLKEN); - macb_or_gem_writel(bp, USRIO, val); + macb_or_gem_writel(bp, USRIO, val); + } /* Set MII management clock divider */ val = macb_mdc_clk_div(bp); @@ -2778,6 +2812,11 @@ static const struct macb_config emac_config = { .init = at91ether_init, }; +static const struct macb_config np4_config = { + .caps = MACB_CAPS_USRIO_DISABLED, + .clk_init = macb_clk_init, + .init = macb_init, +}; static const struct macb_config zynqmp_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO, @@ -2798,6 +2837,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,at32ap7000-macb" }, { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, { .compatible = "cdns,macb" }, + { .compatible = "cdns,np4-macb", .data = &np4_config }, { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, { .compatible = "cdns,gem", .data = &pc302gem_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, @@ -2821,7 +2861,7 @@ static int macb_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device_node *phy_node; const struct macb_config *macb_config = NULL; - struct clk *pclk, *hclk, *tx_clk; + struct clk *pclk, *hclk = NULL, *tx_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; bool native_io; @@ -2888,6 +2928,11 @@ static int macb_probe(struct platform_device *pdev) if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; + bp->wol = 0; + if (of_get_property(np, "cdns,magic-packet", NULL)) + bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; + device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + spin_lock_init(&bp->lock); /* setup capabilities */ @@ -2950,8 +2995,7 @@ static int macb_probe(struct platform_device *pdev) dev->base_addr, dev->irq, dev->dev_addr); phydev = bp->phy_dev; - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; @@ -2981,7 +3025,6 @@ static int macb_remove(struct platform_device *pdev) if (bp->phy_dev) phy_disconnect(bp->phy_dev); mdiobus_unregister(bp->mii_bus); - kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); /* Shutdown the PHY if there is a GPIO reset */ @@ -3006,9 +3049,15 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_carrier_off(netdev); netif_device_detach(netdev); - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); + if (bp->wol & MACB_WOL_ENABLED) { + macb_writel(bp, IER, MACB_BIT(WOL)); + macb_writel(bp, WOL, MACB_BIT(MAG)); + enable_irq_wake(bp->queues[0].irq); + } else { + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + } return 0; } @@ -3019,9 +3068,15 @@ static int __maybe_unused macb_resume(struct device *dev) struct net_device *netdev = platform_get_drvdata(pdev); struct macb *bp = netdev_priv(netdev); - clk_prepare_enable(bp->pclk); - clk_prepare_enable(bp->hclk); - clk_prepare_enable(bp->tx_clk); + if (bp->wol & MACB_WOL_ENABLED) { + macb_writel(bp, IDR, MACB_BIT(WOL)); + macb_writel(bp, WOL, 0); + disable_irq_wake(bp->queues[0].irq); + } else { + clk_prepare_enable(bp->pclk); + clk_prepare_enable(bp->hclk); + clk_prepare_enable(bp->tx_clk); + } netif_device_attach(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 5c03e811224d..9ba416d5afff 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -312,6 +312,8 @@ #define MACB_PFR_SIZE 1 #define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */ #define MACB_PTZ_SIZE 1 +#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ +#define MACB_WOL_SIZE 1 /* Bitfields in MAN */ #define MACB_DATA_OFFSET 0 /* data */ @@ -400,6 +402,7 @@ #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 #define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 +#define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -841,6 +844,8 @@ struct macb { unsigned int rx_frm_len_mask; unsigned int jumbo_max_len; + + u32 wol; }; static inline bool macb_is_gem(struct macb *bp) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index b89504405b72..872765527081 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct lio *lio = container_of(ptp, struct lio, ptp_info); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; @@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, ns += lio->ptp_adjust; spin_unlock_irqrestore(&lio->ptp_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 688828865c48..00cc9156abbb 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -248,10 +248,13 @@ struct nicvf_drv_stats { u64 rx_frames_jumbo; u64 rx_drops; + u64 rcv_buffer_alloc_failures; + /* Tx */ u64 tx_frames_ok; u64 tx_drops; u64 tx_tso; + u64 tx_timeout; u64 txq_stop; u64 txq_wake; }; @@ -306,6 +309,7 @@ struct nicvf { struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; char irq_name[NIC_VF_MSIX_VECTORS][20]; bool irq_allocated[NIC_VF_MSIX_VECTORS]; + cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS]; /* VF <-> PF mailbox communication */ bool pf_acked; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index a12b2e38cf61..d2d8ef270142 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -89,9 +89,11 @@ static const struct nicvf_stat nicvf_drv_stats[] = { NICVF_DRV_STAT(rx_frames_1518), NICVF_DRV_STAT(rx_frames_jumbo), NICVF_DRV_STAT(rx_drops), + NICVF_DRV_STAT(rcv_buffer_alloc_failures), NICVF_DRV_STAT(tx_frames_ok), NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_drops), + NICVF_DRV_STAT(tx_timeout), NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_wake), }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c24cb2a86a42..90ce93e380e1 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -828,7 +828,7 @@ static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq) nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); /* Schedule NAPI */ - napi_schedule(&cq_poll->napi); + napi_schedule_irqoff(&cq_poll->napi); /* Clear interrupt */ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); @@ -899,6 +899,31 @@ static void nicvf_disable_msix(struct nicvf *nic) } } +static void nicvf_set_irq_affinity(struct nicvf *nic) +{ + int vec, cpu; + int irqnum; + + for (vec = 0; vec < nic->num_vec; vec++) { + if (!nic->irq_allocated[vec]) + continue; + + if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL)) + return; + /* CQ interrupts */ + if (vec < NICVF_INTR_ID_SQ) + /* Leave CPU0 for RBDR and other interrupts */ + cpu = nicvf_netdev_qidx(nic, vec) + 1; + else + cpu = 0; + + cpumask_set_cpu(cpumask_local_spread(cpu, nic->node), + nic->affinity_mask[vec]); + irqnum = nic->msix_entries[vec].vector; + irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]); + } +} + static int nicvf_register_interrupts(struct nicvf *nic) { int irq, ret = 0; @@ -944,8 +969,13 @@ static int nicvf_register_interrupts(struct nicvf *nic) ret = request_irq(nic->msix_entries[irq].vector, nicvf_qs_err_intr_handler, 0, nic->irq_name[irq], nic); - if (!ret) - nic->irq_allocated[irq] = true; + if (ret) + goto err; + + nic->irq_allocated[irq] = true; + + /* Set IRQ affinities */ + nicvf_set_irq_affinity(nic); err: if (ret) @@ -963,6 +993,9 @@ static void nicvf_unregister_interrupts(struct nicvf *nic) if (!nic->irq_allocated[irq]) continue; + irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL); + free_cpumask_var(nic->affinity_mask[irq]); + if (irq < NICVF_INTR_ID_SQ) free_irq(nic->msix_entries[irq].vector, nic->napi[irq]); else @@ -1394,6 +1427,7 @@ static void nicvf_tx_timeout(struct net_device *dev) netdev_warn(dev, "%s: Transmit timed out, resetting\n", dev->name); + nic->drv_stats.tx_timeout++; schedule_work(&nic->reset_task); } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d0d1b5490061..5adb208c1ad2 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -78,7 +78,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, u32 buf_len, u64 **rbuf) { - int order = get_order(buf_len); + int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; /* Check if request can be accomodated in previous allocated page */ if (nic->rb_page) { @@ -96,8 +96,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, order); if (!nic->rb_page) { - netdev_err(nic->netdev, - "Failed to allocate new rcv buffer\n"); + nic->drv_stats.rcv_buffer_alloc_failures++; return -ENOMEM; } nic->rb_page_offset = 0; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9df26c2263bc..f8abdffdd851 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -886,7 +886,8 @@ static void bgx_get_qlm_mode(struct bgx *bgx) #ifdef CONFIG_ACPI -static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) +static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, + u8 *dst) { u8 mac[ETH_ALEN]; int ret; @@ -897,10 +898,13 @@ static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) goto out; if (!is_valid_ether_addr(mac)) { + dev_err(dev, "MAC address invalid: %pM\n", mac); ret = -EINVAL; goto out; } + dev_info(dev, "MAC address set to: %pM\n", mac); + memcpy(dst, mac, ETH_ALEN); out: return ret; @@ -911,14 +915,15 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, u32 lvl, void *context, void **rv) { struct bgx *bgx = context; + struct device *dev = &bgx->pdev->dev; struct acpi_device *adev; if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; out: @@ -968,26 +973,27 @@ static int bgx_init_acpi_phy(struct bgx *bgx) static int bgx_init_of_phy(struct bgx *bgx) { - struct device_node *np; - struct device_node *np_child; + struct fwnode_handle *fwn; u8 lmac = 0; - char bgx_sel[5]; const char *mac; - /* Get BGX node from DT */ - snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); - np = of_find_node_by_name(NULL, bgx_sel); - if (!np) - return -ENODEV; + device_for_each_child_node(&bgx->pdev->dev, fwn) { + struct device_node *phy_np; + struct device_node *node = to_of_node(fwn); - for_each_child_of_node(np, np_child) { - struct device_node *phy_np = of_parse_phandle(np_child, - "phy-handle", 0); + /* If it is not an OF node we cannot handle it yet, so + * exit the loop. + */ + if (!node) + break; + + phy_np = of_parse_phandle(node, "phy-handle", 0); if (!phy_np) continue; + bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); - mac = of_get_mac_address(np_child); + mac = of_get_mac_address(node); if (mac) ether_addr_copy(bgx->lmac[lmac].mac, mac); @@ -995,7 +1001,7 @@ static int bgx_init_of_phy(struct bgx *bgx) bgx->lmac[lmac].lmacid = lmac; lmac++; if (lmac == MAX_LMAC_PER_BGX) { - of_node_put(np_child); + of_node_put(node); break; } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index d288dcf6062f..7ad43af6bde1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -306,6 +306,10 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, INIT_LIST_HEAD(&ctbl->hash_list[i]); cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry)); + if (!cl_list) { + t4_free_mem(ctbl); + return NULL; + } ctbl->cl_list = (void *)cl_list; for (i = 0; i < clipt_size; i++) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ec6e849676c1..1dac6c6111bf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -702,6 +702,11 @@ struct doorbell_stats { u32 db_full; }; +struct hash_mac_addr { + struct list_head list; + u8 addr[ETH_ALEN]; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -740,6 +745,7 @@ struct adapter { void *uld_handle[CXGB4_ULD_MAX]; struct list_head list_node; struct list_head rcu_node; + struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ struct tid_info tids; void **tid_release_head; @@ -1207,6 +1213,24 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static inline int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -1389,6 +1413,9 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, unsigned int naddr, + const u8 **addr, bool sleep_ok); int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, bool add_smt); int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 1bab34f923e7..e6a4072b494b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1726,13 +1726,13 @@ static int mps_tcam_show(struct seq_file *seq, void *v) seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012llx %06x %06x - - %3c" - " %3c %4x " + " 'I' %4x " "%3c %#x%4u%4d", idx, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (unsigned long long)mask, vniy, vnix, dip_hit ? 'Y' : 'N', - lookup_type ? 'I' : 'O', port_num, + port_num, (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), T6_PF_G(cls_lo), diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index b8a5fb0c32d4..adad73f7c8cd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -338,84 +338,108 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); } +int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ +module_param(dbfifo_int_thresh, int, 0644); +MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); + /* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. + * usecs to sleep while draining the dbfifo */ -static int set_addr_filters(const struct net_device *dev, bool sleep) +static int dbfifo_drain_delay = 1000; +module_param(dbfifo_drain_delay, int, 0644); +MODULE_PARM_DESC(dbfifo_drain_delay, + "usecs to sleep while draining the dbfifo"); + +static inline int cxgb4_set_addr_hash(struct port_info *pi) { + struct adapter *adap = pi->adapter; + u64 vec = 0; + bool ucast = false; + struct hash_mac_addr *entry; + + /* Calculate the hash vector for the updated list and program it */ + list_for_each_entry(entry, &adap->mac_hlist, list) { + ucast |= is_unicast_ether_addr(entry->addr); + vec |= (1ULL << hash_mac_addr(entry->addr)); + } + return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, + vec, false); +} + +static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + int ret; u64 mhash = 0; u64 uhash = 0; - bool free = true; - u16 filt_idx[7]; - const u8 *addr[7]; - int ret, naddr = 0; - const struct netdev_hw_addr *ha; - int uc_cnt = netdev_uc_count(dev); - int mc_cnt = netdev_mc_count(dev); - const struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->pf; + bool free = false; + bool ucast = is_unicast_ether_addr(mac_addr); + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *new_entry; - /* first do the secondary unicast addresses */ - netdev_for_each_uc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &uhash, sleep); - if (ret < 0) - return ret; - - free = false; - naddr = 0; - } + ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, + NULL, ucast ? &uhash : &mhash, false); + if (ret < 0) + goto out; + /* if hash != 0, then add the addr to hash addr list + * so on the end we will calculate the hash for the + * list and program it + */ + if (uhash || mhash) { + new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, mac_addr); + list_add_tail(&new_entry->list, &adap->mac_hlist); + ret = cxgb4_set_addr_hash(pi); } +out: + return ret < 0 ? ret : 0; +} - /* next set up the multicast addresses */ - netdev_for_each_mc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &mhash, sleep); - if (ret < 0) - return ret; +static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + int ret; + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *entry, *tmp; - free = false; - naddr = 0; + /* If the MAC address to be removed is in the hash addr + * list, delete it from the list and update hash vector + */ + list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { + if (ether_addr_equal(entry->addr, mac_addr)) { + list_del(&entry->list); + kfree(entry); + return cxgb4_set_addr_hash(pi); } } - return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, - uhash | mhash, sleep); + ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); + return ret < 0 ? -EINVAL : 0; } -int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ -module_param(dbfifo_int_thresh, int, 0644); -MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); - -/* - * usecs to sleep while draining the dbfifo - */ -static int dbfifo_drain_delay = 1000; -module_param(dbfifo_drain_delay, int, 0644); -MODULE_PARM_DESC(dbfifo_drain_delay, - "usecs to sleep while draining the dbfifo"); - /* * Set Rx properties of a port, such as promiscruity, address filters, and MTU. * If @mtu is -1 it is left unchanged. */ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { - int ret; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu, - (dev->flags & IFF_PROMISC) ? 1 : 0, - (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, - sleep_ok); - return ret; + if (!(dev->flags & IFF_PROMISC)) { + __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + if (!(dev->flags & IFF_ALLMULTI)) + __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + } + + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, + sleep_ok); } /** @@ -2677,6 +2701,8 @@ static int cxgb_up(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) update_clip(adap); #endif + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adap->mac_hlist); out: return err; irq_err: diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 636b4691f252..cc1736bece0f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4433,23 +4433,6 @@ void t4_intr_disable(struct adapter *adapter) } /** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by HW inexact - * (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - -/** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command @@ -6738,6 +6721,81 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, } /** + * t4_free_mac_filt - frees exact-match filters of given MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @sleep_ok: call is allowed to sleep + * + * Frees the exact-match filter for each of the supplied addresses + * + * Returns a negative error number or the number of filters freed. + */ +int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, unsigned int naddr, + const u8 **addr, bool sleep_ok) +{ + int offset, ret = 0; + struct fw_vi_mac_cmd c; + unsigned int nfilters = 0; + unsigned int max_naddr = is_t4(adap->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int rem = naddr; + + if (naddr > max_naddr) + return -EINVAL; + + for (offset = 0; offset < (int)naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) + ? rem + : ARRAY_SIZE(c.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + break; + + for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (index < max_naddr) + nfilters++; + } + + offset += fw_naddr; + rem -= fw_naddr; + } + + if (ret == 0) + ret = nfilters; + return ret; +} + +/** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index a072d341e205..1d2d1da40c80 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1021,6 +1021,8 @@ struct cpl_l2t_write_req { #define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S) #define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U) +#define CPL_L2T_VLAN_NONE 0xfff + struct cpl_l2t_write_rpl { union opcode_tid ot; u8 status; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index a32de30ea663..c8661c77b4e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -561,6 +561,7 @@ enum fw_flowc_mnem { FW_FLOWC_MNEM_SNDBUF, FW_FLOWC_MNEM_MSS, FW_FLOWC_MNEM_TXDATAPLEN_MAX, + FW_FLOWC_MNEM_SCHEDCLASS = 11, }; struct fw_flowc_mnemval { diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 6049f70e110c..4a707c32d76f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -348,6 +348,11 @@ struct sge { #define for_each_ethrxq(sge, iter) \ for (iter = 0; iter < (sge)->ethqsets; iter++) +struct hash_mac_addr { + struct list_head list; + u8 addr[ETH_ALEN]; +}; + /* * Per-"adapter" (Virtual Function) information. */ @@ -381,6 +386,9 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + + /* list of MAC addresses in MPS Hash */ + struct list_head mac_hlist; }; enum { /* adapter flags */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0cfa5d72cafd..8337514ababb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -741,6 +741,9 @@ static int adapter_up(struct adapter *adapter) */ enable_rx(adapter); t4vf_sge_start(adapter); + + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adapter->mac_hlist); return 0; } @@ -905,51 +908,74 @@ static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device return naddr; } -/* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. - */ -static int set_addr_filters(const struct net_device *dev, bool sleep) +static inline int cxgb4vf_set_addr_hash(struct port_info *pi) { - u64 mhash = 0; - u64 uhash = 0; - bool free = true; - unsigned int offset, naddr; - const u8 *addr[7]; - int ret; - const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u64 vec = 0; + bool ucast = false; + struct hash_mac_addr *entry; - /* first do the secondary unicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_uc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; + /* Calculate the hash vector for the updated list and program it */ + list_for_each_entry(entry, &adapter->mac_hlist, list) { + ucast |= is_unicast_ether_addr(entry->addr); + vec |= (1ULL << hash_mac_addr(entry->addr)); + } + return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false); +} - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &uhash, sleep); - if (ret < 0) - return ret; +static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + int ret; + u64 mhash = 0; + u64 uhash = 0; + bool free = false; + bool ucast = is_unicast_ether_addr(mac_addr); + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *new_entry; - free = false; + ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist, + NULL, ucast ? &uhash : &mhash, false); + if (ret < 0) + goto out; + /* if hash != 0, then add the addr to hash addr list + * so on the end we will calculate the hash for the + * list and program it + */ + if (uhash || mhash) { + new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, mac_addr); + list_add_tail(&new_entry->list, &adapter->mac_hlist); + ret = cxgb4vf_set_addr_hash(pi); } +out: + return ret < 0 ? ret : 0; +} - /* next set up the multicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_mc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; +static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + int ret; + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *entry, *tmp; - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &mhash, sleep); - if (ret < 0) - return ret; - free = false; + /* If the MAC address to be removed is in the hash addr + * list, delete it from the list and update hash vector + */ + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { + if (ether_addr_equal(entry->addr, mac_addr)) { + list_del(&entry->list); + kfree(entry); + return cxgb4vf_set_addr_hash(pi); + } } - return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, - uhash | mhash, sleep); + ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false); + return ret < 0 ? -EINVAL : 0; } /* @@ -958,16 +984,18 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) */ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { - int ret; struct port_info *pi = netdev_priv(dev); - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1, - (dev->flags & IFF_PROMISC) != 0, - (dev->flags & IFF_ALLMULTI) != 0, - 1, -1, sleep_ok); - return ret; + if (!(dev->flags & IFF_PROMISC)) { + __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); + if (!(dev->flags & IFF_ALLMULTI)) + __dev_mc_sync(dev, cxgb4vf_mac_sync, + cxgb4vf_mac_unsync); + } + return t4vf_set_rxmode(pi->adapter, pi->viid, -1, + (dev->flags & IFF_PROMISC) != 0, + (dev->flags & IFF_ALLMULTI) != 0, + 1, -1, sleep_ok); } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 88b8981b4751..6ce302fe1a61 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -285,6 +285,24 @@ static inline int is_t4(enum chip_type chip) return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by hardware + * inexact (hash) address matching. + */ +static inline int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + int t4vf_wait_dev_ready(struct adapter *); int t4vf_port_init(struct adapter *, int); @@ -320,6 +338,8 @@ int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, bool); int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int, const u8 **, u16 *, u64 *, bool); +int t4vf_free_mac_filt(struct adapter *, unsigned int, unsigned int naddr, + const u8 **, bool); int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool); int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool); int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index b6fa74aafe47..54220117dcba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -236,23 +236,6 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, return -ETIMEDOUT; } -/** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by hardware - * inexact (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) @@ -1266,6 +1249,77 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, } /** + * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses + * @adapter: the adapter + * @viid: the VI id + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @sleep_ok: call is allowed to sleep + * + * Frees the exact-match filter for each of the supplied addresses + * + * Returns a negative error number or the number of filters freed. + */ +int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, + unsigned int naddr, const u8 **addr, bool sleep_ok) +{ + int offset, ret = 0; + struct fw_vi_mac_cmd cmd; + unsigned int nfilters = 0; + unsigned int max_naddr = adapter->params.arch.mps_tcam_size; + unsigned int rem = naddr; + + if (naddr > max_naddr) + return -EINVAL; + + for (offset = 0; offset < (int)naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? + rem : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, + sleep_ok); + if (ret) + break; + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (index < max_naddr) + nfilters++; + } + + offset += fw_naddr; + rem -= fw_naddr; + } + + if (ret == 0) + ret = nfilters; + return ret; +} + +/** * t4vf_change_mac - modifies the exact-match filter for a MAC address * @adapter: the adapter * @viid: the Virtual Interface ID diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 13d00a38a5bd..b69a9eacc531 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -255,15 +255,9 @@ static int dnet_mii_probe(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = NULL; - int phy_addr; /* find the first phy */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (bp->mii_bus->phy_map[phy_addr]) { - phydev = bp->mii_bus->phy_map[phy_addr]; - break; - } - } + phydev = phy_find_first(bp->mii_bus); if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); @@ -274,11 +268,11 @@ static int dnet_mii_probe(struct net_device *dev) /* attach the mac to the phy */ if (bp->capabilities & DNET_HAS_RMII) { - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &dnet_handle_link_change, PHY_INTERFACE_MODE_RMII); } else { - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &dnet_handle_link_change, PHY_INTERFACE_MODE_MII); } @@ -308,7 +302,7 @@ static int dnet_mii_probe(struct net_device *dev) static int dnet_mii_init(struct dnet *bp) { - int err, i; + int err; bp->mii_bus = mdiobus_alloc(); if (bp->mii_bus == NULL) @@ -323,16 +317,6 @@ static int dnet_mii_init(struct dnet *bp) bp->mii_bus->priv = bp; - bp->mii_bus->irq = devm_kmalloc(&bp->pdev->dev, - sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!bp->mii_bus->irq) { - err = -ENOMEM; - goto err_out; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - if (mdiobus_register(bp->mii_bus)) { err = -ENXIO; goto err_out; @@ -892,9 +876,7 @@ static int dnet_probe(struct platform_device *pdev) (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); phydev = bp->phy_dev; - dev_info(&pdev->dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index cf837831304b..ab24f84060c6 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -89,6 +89,10 @@ #define BE3_MAX_TX_QS 16 #define BE3_MAX_EVT_QS 16 #define BE3_SRIOV_MAX_EVT_QS 8 +#define SH_VF_MAX_NIC_EQS 3 /* Skyhawk VFs can have a max of 4 EQs + * and at least 1 is granted to either + * SURF/DPDK + */ #define MAX_RSS_IFACES 15 #define MAX_RX_QS 32 @@ -111,6 +115,8 @@ #define RSS_INDIR_TABLE_LEN 128 #define RSS_HASH_KEY_LEN 40 +#define BE_UNKNOWN_PHY_STATE 0xFF + struct be_dma_mem { void *va; dma_addr_t dma; @@ -386,13 +392,17 @@ enum vf_state { #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7) #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) #define BE_FLAGS_SETUP_DONE BIT(9) -#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) +#define BE_FLAGS_PHY_MISCONFIGURED BIT(10) #define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) #define BE_FLAGS_OS2BMC BIT(12) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 +#define MAX_ERR_RECOVERY_RETRY_COUNT 3 +#define ERR_DETECTION_DELAY 1000 +#define ERR_RECOVERY_RETRY_DELAY 30000 + /* Ethtool set_dump flags */ #define LANCER_INITIATE_FW_DUMP 0x1 #define LANCER_DELETE_FW_DUMP 0x2 @@ -530,6 +540,7 @@ struct be_adapter { u16 work_counter; struct delayed_work be_err_detection_work; + u8 recovery_retries; u8 err_flags; u32 flags; u32 cmd_privileges; @@ -594,6 +605,7 @@ struct be_adapter { u32 bmc_filt_mask; u32 fat_dump_len; u16 serial_num[CNTL_SERIAL_NUM_WORDS]; + u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */ }; #define be_physfn(adapter) (!adapter->virtfn) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index b63d8ad2e115..66fa21426fe2 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -19,19 +19,25 @@ #include "be.h" #include "be_cmds.h" -static char *be_port_misconfig_evt_desc[] = { - "A valid SFP module detected", - "Optics faulted/ incorrectly installed/ not installed.", - "Optics of two types installed.", - "Incompatible optics.", - "Unknown port SFP status" +char *be_misconfig_evt_port_state[] = { + "Physical Link is functional", + "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.", + "Optics of two types installed – Remove one optic or install matching pair of optics.", + "Incompatible optics – Replace with compatible optics for card to function.", + "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.", + "Uncertified optics – Replace with Avago-certified optics to enable link operation." }; -static char *be_port_misconfig_remedy_desc[] = { - "", - "Reseat optics. If issue not resolved, replace", - "Remove one optic or install matching pair of optics", - "Replace with compatible optics for card to function", +static char *be_port_misconfig_evt_severity[] = { + "KERN_WARN", + "KERN_INFO", + "KERN_ERR", + "KERN_WARN" +}; + +static char *phy_state_oper_desc[] = { + "Link is non-operational", + "Link is operational", "" }; @@ -65,7 +71,22 @@ static struct be_cmd_priv_map cmd_priv_map[] = { CMD_SUBSYSTEM_COMMON, BE_PRIV_LNKMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG | BE_PRIV_DEVSEC - } + }, + { + OPCODE_LOWLEVEL_HOST_DDR_DMA, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_LOWLEVEL_LOOPBACK_TEST, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, }; static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) @@ -236,7 +257,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (base_status != MCC_STATUS_SUCCESS && !be_skip_err_log(opcode, base_status, addl_status)) { - if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { + if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST || + addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) { dev_warn(&adapter->pdev->dev, "VF is not privileged to issue opcode %d-%d\n", opcode, subsystem); @@ -281,22 +303,56 @@ static void be_async_port_misconfig_event_process(struct be_adapter *adapter, { struct be_async_event_misconfig_port *evt = (struct be_async_event_misconfig_port *)compl; - u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1); + u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1); + u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2); + u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE; struct device *dev = &adapter->pdev->dev; - u8 port_misconfig_evt; + u8 msg_severity = DEFAULT_MSG_SEVERITY; + u8 phy_state_info; + u8 new_phy_state; + + new_phy_state = + (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff; + + if (new_phy_state == adapter->phy_state) + return; + + adapter->phy_state = new_phy_state; + + /* for older fw that doesn't populate link effect data */ + if (!sfp_misconfig_evt_word2) + goto log_message; - port_misconfig_evt = - ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff); + phy_state_info = + (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff; + if (phy_state_info & PHY_STATE_INFO_VALID) { + msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1; + + if (be_phy_unqualified(new_phy_state)) + phy_oper_state = (phy_state_info & PHY_STATE_OPER); + } + +log_message: /* Log an error message that would allow a user to determine * whether the SFPs have an issue */ - dev_info(dev, "Port %c: %s %s", adapter->port_name, - be_port_misconfig_evt_desc[port_misconfig_evt], - be_port_misconfig_remedy_desc[port_misconfig_evt]); + if (be_phy_state_unknown(new_phy_state)) + dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, + "Port %c: Unrecognized Optics state: 0x%x. %s", + adapter->port_name, + new_phy_state, + phy_state_oper_desc[phy_oper_state]); + else + dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, + "Port %c: %s %s", + adapter->port_name, + be_misconfig_evt_port_state[new_phy_state], + phy_state_oper_desc[phy_oper_state]); - if (port_misconfig_evt == INCOMPATIBLE_SFP) - adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP; + /* Log Vendor name and part no. if a misconfigured SFP is detected */ + if (be_phy_misconfigured(new_phy_state)) + adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED; } /* Grp5 CoS Priority evt */ @@ -1497,34 +1553,25 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, return status; } -/* Uses MCCQ */ +/* Uses MCCQ if available else MBOX */ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) { - struct be_mcc_wrb *wrb; + struct be_mcc_wrb wrb = {0}; struct be_cmd_req_if_destroy *req; int status; if (interface_id == -1) return 0; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - req = embedded_payload(wrb); + req = embedded_payload(&wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_DESTROY, - sizeof(*req), wrb, NULL); + sizeof(*req), &wrb, NULL); req->hdr.domain = domain; req->interface_id = cpu_to_le32(interface_id); - status = be_mcc_notify_wait(adapter); -err: - spin_unlock_bh(&adapter->mcc_lock); + status = be_cmd_notify_wait(adapter, &wrb); return status; } @@ -3168,6 +3215,10 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_set_lmode *req; int status; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3213,6 +3264,10 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, struct be_cmd_resp_loopback_test *resp; int status; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3259,6 +3314,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, int status; int i, j = 0; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 241819b36ca7..0b9f741f31af 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -68,7 +68,8 @@ enum mcc_addl_status { MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a, MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab, MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56, - MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57 + MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57, + MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60 }; #define CQE_BASE_STATUS_MASK 0xFFFF @@ -175,10 +176,53 @@ struct be_async_event_qnq { u32 flags; } __packed; -#define INCOMPATIBLE_SFP 0x3 +enum { + BE_PHY_FUNCTIONAL = 0, + BE_PHY_NOT_PRESENT = 1, + BE_PHY_DIFF_MEDIA = 2, + BE_PHY_INCOMPATIBLE = 3, + BE_PHY_UNQUALIFIED = 4, + BE_PHY_UNCERTIFIED = 5 +}; + +#define PHY_STATE_MSG_SEVERITY 0x6 +#define PHY_STATE_OPER 0x1 +#define PHY_STATE_INFO_VALID 0x80 +#define PHY_STATE_OPER_MSG_NONE 0x2 +#define DEFAULT_MSG_SEVERITY 0x1 + +#define be_phy_state_unknown(phy_state) (phy_state > BE_PHY_UNCERTIFIED) +#define be_phy_unqualified(phy_state) \ + (phy_state == BE_PHY_UNQUALIFIED || \ + phy_state == BE_PHY_UNCERTIFIED) +#define be_phy_misconfigured(phy_state) \ + (phy_state == BE_PHY_INCOMPATIBLE || \ + phy_state == BE_PHY_UNQUALIFIED || \ + phy_state == BE_PHY_UNCERTIFIED) + +extern char *be_misconfig_evt_port_state[]; + /* async event indicating misconfigured port */ struct be_async_event_misconfig_port { + /* DATA_WORD1: + * phy state of port 0: bits 7 - 0 + * phy state of port 1: bits 15 - 8 + * phy state of port 2: bits 23 - 16 + * phy state of port 3: bits 31 - 24 + */ u32 event_data_word1; + /* DATA_WORD2: + * phy state info of port 0: bits 7 - 0 + * phy state info of port 1: bits 15 - 8 + * phy state info of port 2: bits 23 - 16 + * phy state info of port 3: bits 31 - 24 + * + * PHY STATE INFO: + * Link operability :bit 0 + * Message severity :bit 2 - 1 + * Rsvd :bits 6 - 3 + * phy state info valid :bit 7 + */ u32 event_data_word2; u32 rsvd0; u32 flags; diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index a19ac441336f..2ff691636dac 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -720,29 +720,32 @@ static int be_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; switch (state) { case ETHTOOL_ID_ACTIVE: - be_cmd_get_beacon_state(adapter, adapter->hba_port_num, - &adapter->beacon_state); - return 1; /* cycle on/off once per second */ + status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num, + &adapter->beacon_state); + if (status) + return be_cmd_status(status); + return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - BEACON_STATE_ENABLED); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, BEACON_STATE_ENABLED); break; case ETHTOOL_ID_OFF: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - BEACON_STATE_DISABLED); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, BEACON_STATE_DISABLED); break; case ETHTOOL_ID_INACTIVE: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - adapter->beacon_state); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, adapter->beacon_state); } - return 0; + return be_cmd_status(status); } static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index f99de3657ce3..88f427cb76c3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1463,6 +1463,9 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) if (lancer_chip(adapter) && vid == 0) return 0; + if (!test_bit(vid, adapter->vids)) + return 0; + clear_bit(vid, adapter->vids); adapter->vlans_added--; @@ -1914,8 +1917,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo) if (!aic->enable) return 0; - if (time_before_eq(now, aic->jiffies) || - jiffies_to_msecs(now - aic->jiffies) < 1) + if (jiffies_to_msecs(now - aic->jiffies) < 1) eqd = aic->prev_eqd; else eqd = be_get_new_eqd(eqo); @@ -3363,6 +3365,7 @@ done: static void be_rx_qs_destroy(struct be_adapter *adapter) { + struct rss_info *rss = &adapter->rss_info; struct be_queue_info *q; struct be_rx_obj *rxo; int i; @@ -3389,6 +3392,12 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) } be_queue_free(adapter, q); } + + if (rss->rss_flags) { + rss->rss_flags = RSS_ENABLE_NONE; + be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + 128, rss->rss_hkey); + } } static void be_disable_if_filters(struct be_adapter *adapter) @@ -3509,20 +3518,21 @@ static int be_rx_qs_create(struct be_adapter *adapter) if (!BEx_chip(adapter)) rss->rss_flags |= RSS_ENABLE_UDP_IPV4 | RSS_ENABLE_UDP_IPV6; + + netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); + rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + RSS_INDIR_TABLE_LEN, rss_key); + if (rc) { + rss->rss_flags = RSS_ENABLE_NONE; + return rc; + } + + memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); } else { /* Disable RSS, if only default RX Q is created */ rss->rss_flags = RSS_ENABLE_NONE; } - netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); - rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, - RSS_INDIR_TABLE_LEN, rss_key); - if (rc) { - rss->rss_flags = RSS_ENABLE_NONE; - return rc; - } - - memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); /* Post 1 less than RXQ-len to avoid head being equal to tail, * which is a queue empty condition @@ -3789,18 +3799,15 @@ static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) struct be_resources res = adapter->pool_res; u16 num_vf_qs = 1; - /* Distribute the queue resources equally among the PF and it's VFs + /* Distribute the queue resources among the PF and it's VFs * Do not distribute queue resources in multi-channel configuration. */ if (num_vfs && !be_is_mc(adapter)) { - /* If number of VFs requested is 8 less than max supported, - * assign 8 queue pairs to the PF and divide the remaining - * resources evenly among the VFs - */ - if (num_vfs < (be_max_vfs(adapter) - 8)) - num_vf_qs = (res.max_rss_qs - 8) / num_vfs; - else - num_vf_qs = res.max_rss_qs / num_vfs; + /* Divide the qpairs evenly among the VFs and the PF, capped + * at VF-EQ-count. Any remainder qpairs belong to the PF. + */ + num_vf_qs = min(SH_VF_MAX_NIC_EQS, + res.max_rss_qs / (num_vfs + 1)); /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable * interfaces per port. Provide RSS on VFs, only if number @@ -4082,6 +4089,7 @@ static void be_setup_init(struct be_adapter *adapter) adapter->if_handle = -1; adapter->be3_native = false; adapter->if_flags = 0; + adapter->phy_state = BE_UNKNOWN_PHY_STATE; if (be_physfn(adapter)) adapter->cmd_privileges = MAX_PRIVILEGES; else @@ -4265,10 +4273,10 @@ static void be_schedule_worker(struct be_adapter *adapter) adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } -static void be_schedule_err_detection(struct be_adapter *adapter) +static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay) { schedule_delayed_work(&adapter->be_err_detection_work, - msecs_to_jiffies(1000)); + msecs_to_jiffies(delay)); adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; } @@ -4307,6 +4315,23 @@ err: return status; } +static int be_if_create(struct be_adapter *adapter) +{ + u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + u32 cap_flags = be_if_cap_flags(adapter); + int status; + + if (adapter->cfg_num_qs == 1) + cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); + + en_flags &= cap_flags; + /* will enable all the needed filter flags in be_open() */ + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + &adapter->if_handle, 0); + + return status; +} + int be_update_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -4324,6 +4349,9 @@ int be_update_queues(struct be_adapter *adapter) be_msix_disable(adapter); be_clear_queues(adapter); + status = be_cmd_if_destroy(adapter, adapter->if_handle, 0); + if (status) + return status; if (!msix_enabled(adapter)) { status = be_msix_enable(adapter); @@ -4331,6 +4359,10 @@ int be_update_queues(struct be_adapter *adapter) return status; } + status = be_if_create(adapter); + if (status) + return status; + status = be_setup_queues(adapter); if (status) return status; @@ -4395,7 +4427,6 @@ static int be_func_init(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; - u32 en_flags; int status; status = be_func_init(adapter); @@ -4428,10 +4459,7 @@ static int be_setup(struct be_adapter *adapter) goto err; /* will enable all the needed filter flags in be_open() */ - en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; - en_flags = en_flags & be_if_cap_flags(adapter); - status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, - &adapter->if_handle, 0); + status = be_if_create(adapter); if (status) goto err; @@ -4589,6 +4617,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { + /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */ + if (!pci_sriov_get_totalvfs(adapter->pdev)) + return 0; hsw_mode = PORT_FWD_TYPE_VEB; } else { status = be_cmd_get_hsw_config(adapter, NULL, 0, @@ -4804,7 +4835,7 @@ static void be_netdev_init(struct net_device *netdev) netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; - if (be_multi_rxq(adapter)) + if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) netdev->hw_features |= NETIF_F_RXHASH; netdev->features |= netdev->hw_features | @@ -4859,21 +4890,27 @@ static int be_resume(struct be_adapter *adapter) static int be_err_recover(struct be_adapter *adapter) { - struct device *dev = &adapter->pdev->dev; int status; + /* Error recovery is supported only Lancer as of now */ + if (!lancer_chip(adapter)) + return -EIO; + + /* Wait for adapter to reach quiescent state before + * destroying queues + */ + status = be_fw_wait_ready(adapter); + if (status) + goto err; + + be_cleanup(adapter); + status = be_resume(adapter); if (status) goto err; - dev_info(dev, "Adapter recovery successful\n"); return 0; err: - if (be_physfn(adapter)) - dev_err(dev, "Adapter recovery failed\n"); - else - dev_err(dev, "Re-trying adapter recovery\n"); - return status; } @@ -4882,21 +4919,43 @@ static void be_err_detection_task(struct work_struct *work) struct be_adapter *adapter = container_of(work, struct be_adapter, be_err_detection_work.work); - int status = 0; + struct device *dev = &adapter->pdev->dev; + int recovery_status; + int delay = ERR_DETECTION_DELAY; be_detect_error(adapter); - if (be_check_error(adapter, BE_ERROR_HW)) { - be_cleanup(adapter); - - /* As of now error recovery support is in Lancer only */ - if (lancer_chip(adapter)) - status = be_err_recover(adapter); + if (be_check_error(adapter, BE_ERROR_HW)) + recovery_status = be_err_recover(adapter); + else + goto reschedule_task; + + if (!recovery_status) { + adapter->recovery_retries = 0; + dev_info(dev, "Adapter recovery successful\n"); + goto reschedule_task; + } else if (be_virtfn(adapter)) { + /* For VFs, check if PF have allocated resources + * every second. + */ + dev_err(dev, "Re-trying adapter recovery\n"); + goto reschedule_task; + } else if (adapter->recovery_retries++ < + MAX_ERR_RECOVERY_RETRY_COUNT) { + /* In case of another error during recovery, it takes 30 sec + * for adapter to come out of error. Retry error recovery after + * this time interval. + */ + dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n"); + delay = ERR_RECOVERY_RETRY_DELAY; + goto reschedule_task; + } else { + dev_err(dev, "Adapter recovery failed\n"); } - /* Always attempt recovery on VFs */ - if (!status || be_virtfn(adapter)) - be_schedule_err_detection(adapter); + return; +reschedule_task: + be_schedule_err_detection(adapter, delay); } static void be_log_sfp_info(struct be_adapter *adapter) @@ -4906,11 +4965,13 @@ static void be_log_sfp_info(struct be_adapter *adapter) status = be_cmd_query_sfp_info(adapter); if (!status) { dev_err(&adapter->pdev->dev, - "Unqualified SFP+ detected on %c from %s part no: %s", - adapter->port_name, adapter->phy.vendor_name, + "Port %c: %s Vendor: %s part no: %s", + adapter->port_name, + be_misconfig_evt_port_state[adapter->phy_state], + adapter->phy.vendor_name, adapter->phy.vendor_pn); } - adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; + adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED; } static void be_worker(struct work_struct *work) @@ -4954,7 +5015,7 @@ static void be_worker(struct work_struct *work) if (!skyhawk_chip(adapter)) be_eqd_update(adapter, false); - if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) + if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED) be_log_sfp_info(adapter); reschedule: @@ -5292,7 +5353,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) be_roce_dev_add(adapter); - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); /* On Die temperature not supported for VF. */ if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) { @@ -5359,7 +5420,7 @@ static int be_pci_resume(struct pci_dev *pdev) if (status) return status; - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); if (adapter->wol_en) be_setup_wol(adapter, false); @@ -5459,7 +5520,7 @@ static void be_eeh_resume(struct pci_dev *pdev) if (status) goto err; - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); return; err: dev_err(&adapter->pdev->dev, "EEH resume failed\n"); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ff665493ca97..62fa136554ac 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -678,7 +678,7 @@ static int ethoc_mdio_probe(struct net_device *dev) int err; if (priv->phy_id != -1) - phy = priv->mdio->phy_map[priv->phy_id]; + phy = mdiobus_get_phy(priv->mdio, priv->phy_id); else phy = phy_find_first(priv->mdio); @@ -766,7 +766,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (mdio->phy_id >= PHY_MAX_ADDR) return -ERANGE; - phy = priv->mdio->phy_map[mdio->phy_id]; + phy = mdiobus_get_phy(priv->mdio, mdio->phy_id); if (!phy) return -ENODEV; } else { @@ -1015,7 +1015,6 @@ static int ethoc_probe(struct platform_device *pdev) struct resource *mmio = NULL; struct resource *mem = NULL; struct ethoc *priv = NULL; - unsigned int phy; int num_bd; int ret = 0; bool random_mac = false; @@ -1206,19 +1205,10 @@ static int ethoc_probe(struct platform_device *pdev) priv->mdio->write = ethoc_mdio_write; priv->mdio->priv = priv; - priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mdio->irq) { - ret = -ENOMEM; - goto free_mdio; - } - - for (phy = 0; phy < PHY_MAX_ADDR; phy++) - priv->mdio->irq[phy] = PHY_POLL; - ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); - goto free_mdio; + goto free; } ret = ethoc_mdio_probe(netdev); @@ -1250,8 +1240,6 @@ error2: netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); -free_mdio: - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: if (priv->clk) diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig index 48ecbc8aaaea..b423ad380b6a 100644 --- a/drivers/net/ethernet/ezchip/Kconfig +++ b/drivers/net/ethernet/ezchip/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP config EZCHIP_NPS_MANAGEMENT_ENET tristate "EZchip NPS management enet support" depends on OF_IRQ && OF_NET + depends on HAS_IOMEM ---help--- Simple LAN device for debug or management purposes. Device supports interrupts for RX and TX(completion). diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 6d0c5d5eea6d..84384e1585a5 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -71,7 +71,6 @@ struct ftgmac100 { struct napi_struct napi; struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; struct phy_device *phydev; int old_speed; }; @@ -835,26 +834,15 @@ static void ftgmac100_adjust_link(struct net_device *netdev) static int ftgmac100_mii_probe(struct ftgmac100 *priv) { struct net_device *netdev = priv->netdev; - struct phy_device *phydev = NULL; - int i; - - /* search for connect PHY device */ - for (i = 0; i < PHY_MAX_ADDR; i++) { - struct phy_device *tmp = priv->mii_bus->phy_map[i]; - - if (tmp) { - phydev = tmp; - break; - } - } + struct phy_device *phydev; - /* now we are supposed to have a proper phydev, to attach to... */ + phydev = phy_find_first(priv->mii_bus); if (!phydev) { netdev_info(netdev, "%s: no PHY found\n", netdev->name); return -ENODEV; } - phydev = phy_connect(netdev, dev_name(&phydev->dev), + phydev = phy_connect(netdev, phydev_name(phydev), &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phydev)) { @@ -1188,7 +1176,6 @@ static int ftgmac100_probe(struct platform_device *pdev) struct net_device *netdev; struct ftgmac100 *priv; int err; - int i; if (!pdev) return -ENODEV; @@ -1257,10 +1244,6 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->mii_bus->priv = netdev; priv->mii_bus->read = ftgmac100_mdiobus_read; priv->mii_bus->write = ftgmac100_mdiobus_write; - priv->mii_bus->irq = priv->phy_irq; - - for (i = 0; i < PHY_MAX_ADDR; i++) - priv->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(priv->mii_bus); if (err) { diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 4097c58d17a7..cbe21dc7e37e 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -4,6 +4,9 @@ obj-$(CONFIG_FEC) += fec.o fec-objs :=fec_main.o fec_ptp.o +CFLAGS_fec_main.o := -D__CHECK_ENDIAN__ +CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__ + obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 99d33e2d35e6..195122e11f10 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -19,8 +19,7 @@ #include <linux/timecounter.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ - defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models @@ -65,6 +64,7 @@ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ +#define FEC_FTRL 0x1b0 /* Frame truncation receive length*/ #define FEC_RACC 0x1c4 /* Receive Accelerator function */ #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ @@ -190,28 +190,45 @@ /* * Define the buffer descriptor structure. + * + * Evidently, ARM SoCs have the FEC block generated in a + * little endian mode so adjust endianness accordingly. */ -#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) +#if defined(CONFIG_ARM) +#define fec32_to_cpu le32_to_cpu +#define fec16_to_cpu le16_to_cpu +#define cpu_to_fec32 cpu_to_le32 +#define cpu_to_fec16 cpu_to_le16 +#define __fec32 __le32 +#define __fec16 __le16 + struct bufdesc { - unsigned short cbd_datlen; /* Data length */ - unsigned short cbd_sc; /* Control and status info */ - unsigned long cbd_bufaddr; /* Buffer address */ + __fec16 cbd_datlen; /* Data length */ + __fec16 cbd_sc; /* Control and status info */ + __fec32 cbd_bufaddr; /* Buffer address */ }; #else +#define fec32_to_cpu be32_to_cpu +#define fec16_to_cpu be16_to_cpu +#define cpu_to_fec32 cpu_to_be32 +#define cpu_to_fec16 cpu_to_be16 +#define __fec32 __be32 +#define __fec16 __be16 + struct bufdesc { - unsigned short cbd_sc; /* Control and status info */ - unsigned short cbd_datlen; /* Data length */ - unsigned long cbd_bufaddr; /* Buffer address */ + __fec16 cbd_sc; /* Control and status info */ + __fec16 cbd_datlen; /* Data length */ + __fec32 cbd_bufaddr; /* Buffer address */ }; #endif struct bufdesc_ex { struct bufdesc desc; - unsigned long cbd_esc; - unsigned long cbd_prot; - unsigned long cbd_bdu; - unsigned long ts; - unsigned short res0[4]; + __fec32 cbd_esc; + __fec32 cbd_prot; + __fec32 cbd_bdu; + __fec32 ts; + __fec16 res0[4]; }; /* @@ -293,12 +310,6 @@ struct bufdesc_ex { #define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ (((X) == 2) ? \ FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) -#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \ - (((X) == 2) ? \ - FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) -#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \ - (((X) == 2) ? \ - FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) @@ -364,6 +375,7 @@ struct bufdesc_ex { #define FEC_ENET_TS_TIMER ((uint)0x00008000) #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) /* ENET interrupt coalescing macro define */ @@ -431,33 +443,35 @@ struct bufdesc_ex { /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +struct bufdesc_prop { + int qid; + /* Address of Rx and Tx buffers */ + struct bufdesc *base; + struct bufdesc *last; + struct bufdesc *cur; + void __iomem *reg_desc_active; + dma_addr_t dma; + unsigned short ring_size; + unsigned char dsize; + unsigned char dsize_log2; +}; + struct fec_enet_priv_tx_q { - int index; + struct bufdesc_prop bd; unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE]; - dma_addr_t bd_dma; - struct bufdesc *tx_bd_base; - uint tx_ring_size; - unsigned short tx_stop_threshold; unsigned short tx_wake_threshold; - struct bufdesc *cur_tx; struct bufdesc *dirty_tx; char *tso_hdrs; dma_addr_t tso_hdrs_dma; }; struct fec_enet_priv_rx_q { - int index; + struct bufdesc_prop bd; struct sk_buff *rx_skbuff[RX_RING_SIZE]; - - dma_addr_t bd_dma; - struct bufdesc *rx_bd_base; - uint rx_ring_size; - - struct bufdesc *cur_rx; }; /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and @@ -497,8 +511,6 @@ struct fec_enet_private { unsigned long work_ts; unsigned long work_mdio; - unsigned short bufdesc_size; - struct platform_device *pdev; int dev_id; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d2328fc5da57..bad0ba29a94a 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -48,6 +48,7 @@ #include <linux/irq.h> #include <linux/clk.h> #include <linux/platform_device.h> +#include <linux/mdio.h> #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> @@ -216,86 +217,38 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define IS_TSO_HEADER(txq, addr) \ ((addr >= txq->tso_hdrs_dma) && \ - (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) static int mii_cnt; -static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, - struct fec_enet_private *fep, - int queue_id) -{ - struct bufdesc *new_bd = bdp + 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; - struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; - struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= txq->tx_bd_base) { - base = txq->tx_bd_base; - ring_size = txq->tx_ring_size; - ex_base = (struct bufdesc_ex *)txq->tx_bd_base; - } else { - base = rxq->rx_bd_base; - ring_size = rxq->rx_ring_size; - ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; - } - - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? - ex_base : ex_new_bd); - else - return (new_bd >= (base + ring_size)) ? - base : new_bd; -} - -static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, - struct fec_enet_private *fep, - int queue_id) -{ - struct bufdesc *new_bd = bdp - 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; - struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; - struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= txq->tx_bd_base) { - base = txq->tx_bd_base; - ring_size = txq->tx_ring_size; - ex_base = (struct bufdesc_ex *)txq->tx_bd_base; - } else { - base = rxq->rx_bd_base; - ring_size = rxq->rx_ring_size; - ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; - } +static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp >= bd->last) ? bd->base + : (struct bufdesc *)(((unsigned)bdp) + bd->dsize); +} - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd < ex_base) ? - (ex_new_bd + ring_size) : ex_new_bd); - else - return (new_bd < base) ? (new_bd + ring_size) : new_bd; +static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp <= bd->base) ? bd->last + : (struct bufdesc *)(((unsigned)bdp) - bd->dsize); } -static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, - struct fec_enet_private *fep) +static int fec_enet_get_bd_index(struct bufdesc *bdp, + struct bufdesc_prop *bd) { - return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; + return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; } -static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, - struct fec_enet_priv_tx_q *txq) +static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) { int entries; - entries = ((const char *)txq->dirty_tx - - (const char *)txq->cur_tx) / fep->bufdesc_size - 1; + entries = (((const char *)txq->dirty_tx - + (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; - return entries > 0 ? entries : entries + txq->tx_ring_size; + return entries >= 0 ? entries : entries + txq->bd.ring_size; } static void swap_buffer(void *bufaddr, int len) @@ -328,18 +281,20 @@ static void fec_dump(struct net_device *ndev) pr_info("Nr SC addr len SKB\n"); txq = fep->tx_queue[0]; - bdp = txq->tx_bd_base; + bdp = txq->bd.base; do { - pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", + pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", index, - bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->bd.cur ? 'S' : ' ', bdp == txq->dirty_tx ? 'H' : ' ', - bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, + fec16_to_cpu(bdp->cbd_sc), + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), txq->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep, 0); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); index++; - } while (bdp != txq->tx_bd_base); + } while (bdp != txq->bd.base); } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -370,10 +325,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *bdp = txq->cur_tx; + struct bufdesc *bdp = txq->bd.cur; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned short queue = skb_get_queue_mapping(skb); int frag, frag_len; unsigned short status; unsigned int estatus = 0; @@ -385,10 +339,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); ebdp = (struct bufdesc_ex *)bdp; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); frag_len = skb_shinfo(skb)->frags[frag].size; @@ -406,16 +360,16 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], bufaddr, frag_len); @@ -428,24 +382,27 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, DMA_TO_DEVICE); if (dma_mapping_error(&fep->pdev->dev, addr)) { - dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); goto dma_mapping_error; } - bdp->cbd_bufaddr = addr; - bdp->cbd_datlen = frag_len; - bdp->cbd_sc = status; + bdp->cbd_bufaddr = cpu_to_fec32(addr); + bdp->cbd_datlen = cpu_to_fec16(frag_len); + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + wmb(); + bdp->cbd_sc = cpu_to_fec16(status); } return bdp; dma_mapping_error: - bdp = txq->cur_tx; + bdp = txq->bd.cur; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep, queue); - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); } return ERR_PTR(-ENOMEM); } @@ -460,12 +417,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, dma_addr_t addr; unsigned short status; unsigned short buflen; - unsigned short queue; unsigned int estatus = 0; unsigned int index; int entries_free; - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { dev_kfree_skb_any(skb); if (net_ratelimit()) @@ -480,17 +436,16 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, } /* Fill in a Tx ring entry */ - bdp = txq->cur_tx; + bdp = txq->bd.cur; last_bdp = bdp; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; /* Set buffer length and buffer pointer */ bufaddr = skb->data; buflen = skb_headlen(skb); - queue = skb_get_queue_mapping(skb); - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, buflen); @@ -511,8 +466,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, if (nr_frags) { last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); - if (IS_ERR(last_bdp)) + if (IS_ERR(last_bdp)) { + dma_unmap_single(&fep->pdev->dev, addr, + buflen, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; + } } else { status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) { @@ -522,6 +481,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, estatus |= BD_ENET_TX_TS; } } + bdp->cbd_bufaddr = cpu_to_fec32(addr); + bdp->cbd_datlen = cpu_to_fec16(buflen); if (fep->bufdesc_ex) { @@ -532,41 +493,43 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } - index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); + index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */ txq->tx_skbuff[index] = skb; - bdp->cbd_datlen = buflen; - bdp->cbd_bufaddr = addr; + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + wmb(); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); + bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); skb_tx_timestamp(skb); /* Make sure the update to bdp and tx_skbuff are performed before - * cur_tx. + * txq->bd.cur. */ wmb(); - txq->cur_tx = bdp; + txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + writel(0, txq->bd.reg_desc_active); return 0; } @@ -579,12 +542,11 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, { struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); - unsigned short queue = skb_get_queue_mapping(skb); unsigned short status; unsigned int estatus = 0; dma_addr_t addr; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); @@ -606,16 +568,16 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, return NETDEV_TX_BUSY; } - bdp->cbd_datlen = size; - bdp->cbd_bufaddr = addr; + bdp->cbd_datlen = cpu_to_fec16(size); + bdp->cbd_bufaddr = cpu_to_fec32(addr); if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } /* Handle the last BD specially */ @@ -624,10 +586,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, if (is_last) { status |= BD_ENET_TX_INTR; if (fep->bufdesc_ex) - ebdp->cbd_esc |= BD_ENET_TX_INT; + ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); } - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); return 0; } @@ -640,13 +602,12 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); - unsigned short queue = skb_get_queue_mapping(skb); void *bufaddr; unsigned long dmabuf; unsigned short status; unsigned int estatus = 0; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); @@ -670,19 +631,19 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, } } - bdp->cbd_bufaddr = dmabuf; - bdp->cbd_datlen = hdr_len; + bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); + bdp->cbd_datlen = cpu_to_fec16(hdr_len); if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); return 0; } @@ -694,13 +655,12 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - struct bufdesc *bdp = txq->cur_tx; - unsigned short queue = skb_get_queue_mapping(skb); + struct bufdesc *bdp = txq->bd.cur; struct tso_t tso; unsigned int index = 0; int ret; - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "NOT enough BD for TSO!\n"); @@ -720,7 +680,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, while (total_len > 0) { char *hdr; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; @@ -735,9 +695,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, int size; size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep, queue); - index = fec_enet_get_bd_index(txq->tx_bd_base, - bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + index = fec_enet_get_bd_index(bdp, &txq->bd); ret = fec_enet_txq_put_data_tso(txq, skb, ndev, bdp, index, tso.data, size, @@ -750,22 +709,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Save skb pointer */ txq->tx_skbuff[index] = skb; skb_tx_timestamp(skb); - txq->cur_tx = bdp; + txq->bd.cur = bdp; /* Trigger transmission start */ if (!(fep->quirks & FEC_QUIRK_ERR007885) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; @@ -795,7 +754,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (ret) return ret; - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free <= txq->tx_stop_threshold) netif_tx_stop_queue(nq); @@ -816,45 +775,45 @@ static void fec_enet_bd_init(struct net_device *dev) for (q = 0; q < fep->num_rx_queues; q++) { /* Initialize the receive buffer descriptors. */ rxq = fep->rx_queue[q]; - bdp = rxq->rx_bd_base; + bdp = rxq->bd.base; - for (i = 0; i < rxq->rx_ring_size; i++) { + for (i = 0; i < rxq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); else - bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp->cbd_sc = cpu_to_fec16(0); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, q); - bdp->cbd_sc |= BD_SC_WRAP; + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); - rxq->cur_rx = rxq->rx_bd_base; + rxq->bd.cur = rxq->bd.base; } for (q = 0; q < fep->num_tx_queues; q++) { /* ...and the same for transmit */ txq = fep->tx_queue[q]; - bdp = txq->tx_bd_base; - txq->cur_tx = bdp; + bdp = txq->bd.base; + txq->bd.cur = bdp; - for (i = 0; i < txq->tx_ring_size; i++) { + for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; + bdp->cbd_sc = cpu_to_fec16(0); if (txq->tx_skbuff[i]) { dev_kfree_skb_any(txq->tx_skbuff[i]); txq->tx_skbuff[i] = NULL; } - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp->cbd_bufaddr = cpu_to_fec32(0); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, q); - bdp->cbd_sc |= BD_SC_WRAP; + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); txq->dirty_tx = bdp; } } @@ -865,7 +824,7 @@ static void fec_enet_active_rxring(struct net_device *ndev) int i; for (i = 0; i < fep->num_rx_queues; i++) - writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); + writel(0, fep->rx_queue[i]->bd.reg_desc_active); } static void fec_enet_enable_ring(struct net_device *ndev) @@ -877,7 +836,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; - writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ @@ -888,7 +847,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) { txq = fep->tx_queue[i]; - writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); /* enable DMA1/2 */ if (i) @@ -906,7 +865,7 @@ static void fec_enet_reset_skb(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) { txq = fep->tx_queue[i]; - for (j = 0; j < txq->tx_ring_size; j++) { + for (j = 0; j < txq->bd.ring_size; j++) { if (txq->tx_skbuff[j]) { dev_kfree_skb_any(txq->tx_skbuff[j]); txq->tx_skbuff[j] = NULL; @@ -946,8 +905,10 @@ fec_restart(struct net_device *ndev) */ if (fep->quirks & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); + writel((__force u32)cpu_to_be32(temp_mac[0]), + fep->hwp + FEC_ADDR_LOW); + writel((__force u32)cpu_to_be32(temp_mac[1]), + fep->hwp + FEC_ADDR_HIGH); } /* Clear any outstanding interrupt. */ @@ -983,6 +944,7 @@ fec_restart(struct net_device *ndev) val &= ~FEC_RACC_OPTIONS; writel(val, fep->hwp + FEC_RACC); } + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); #endif /* @@ -1216,25 +1178,27 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) bdp = txq->dirty_tx; /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); - while (bdp != READ_ONCE(txq->cur_tx)) { - /* Order the load of cur_tx and cbd_sc */ + while (bdp != READ_ONCE(txq->bd.cur)) { + /* Order the load of bd.cur and cbd_sc */ rmb(); - status = READ_ONCE(bdp->cbd_sc); + status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); if (status & BD_ENET_TX_READY) break; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; + if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + bdp->cbd_bufaddr = cpu_to_fec32(0); if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); continue; } @@ -1263,7 +1227,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) struct skb_shared_hwtstamps shhwtstamps; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); skb_tstamp_tx(skb, &shhwtstamps); } @@ -1283,21 +1247,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); /* Since we have freed up a buffer, the ring is no longer full */ if (netif_queue_stopped(ndev)) { - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free >= txq->tx_wake_threshold) netif_tx_wake_queue(nq); } } /* ERR006538: Keep the transmitter going */ - if (bdp != txq->cur_tx && - readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); + if (bdp != txq->bd.cur && + readl(txq->bd.reg_desc_active) == 0) + writel(0, txq->bd.reg_desc_active); } static void @@ -1323,10 +1287,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff if (off) skb_reserve(skb, fep->rx_align + 1 - off); - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); + if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { if (net_ratelimit()) netdev_err(ndev, "Rx DMA memory map failed\n"); return -ENOMEM; @@ -1348,7 +1310,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, if (!new_skb) return false; - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_sync_single_for_cpu(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); if (!swap) @@ -1360,7 +1323,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, return true; } -/* During a receive, the cur_rx points to the current incoming buffer. +/* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. @@ -1393,54 +1356,48 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = rxq->cur_rx; + bdp = rxq->bd.cur; - while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { if (pkt_received >= budget) break; pkt_received++; - /* Since we have allocated space to hold a complete frame, - * the last indicator should be set. - */ - if ((status & BD_ENET_RX_LAST) == 0) - netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ + status ^= BD_ENET_RX_LAST; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | - BD_ENET_RX_CR | BD_ENET_RX_OV)) { + BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | + BD_ENET_RX_CL)) { ndev->stats.rx_errors++; - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { + if (status & BD_ENET_RX_OV) { + /* FIFO overrun */ + ndev->stats.rx_fifo_errors++; + goto rx_processing_done; + } + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH + | BD_ENET_RX_LAST)) { /* Frame too long or too short. */ ndev->stats.rx_length_errors++; + if (status & BD_ENET_RX_LAST) + netdev_err(ndev, "rcv is not +last\n"); } - if (status & BD_ENET_RX_NO) /* Frame alignment */ - ndev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ ndev->stats.rx_crc_errors++; - if (status & BD_ENET_RX_OV) /* FIFO overrun */ - ndev->stats.rx_fifo_errors++; - } - - /* Report late collisions as a frame error. - * On this error, the BD is closed, but we don't know what we - * have in the buffer. So, just drop this frame on the floor. - */ - if (status & BD_ENET_RX_CL) { - ndev->stats.rx_errors++; - ndev->stats.rx_frame_errors++; + /* Report late collisions as a frame error. */ + if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + ndev->stats.rx_frame_errors++; goto rx_processing_done; } /* Process the incoming frame. */ ndev->stats.rx_packets++; - pkt_len = bdp->cbd_datlen; + pkt_len = fec16_to_cpu(bdp->cbd_datlen); ndev->stats.rx_bytes += pkt_len; - index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &rxq->bd); skb = rxq->rx_skbuff[index]; /* The packet length includes FCS, but we don't want to @@ -1455,7 +1412,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ndev->stats.rx_dropped++; goto rx_processing_done; } - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); } @@ -1474,7 +1432,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* If this is a VLAN packet remove the VLAN Tag */ vlan_packet_rcvd = false; if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + fep->bufdesc_ex && + (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { /* Push and remove the vlan tag */ struct vlan_hdr *vlan_header = (struct vlan_hdr *) (data + ETH_HLEN); @@ -1490,12 +1449,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* Get receive timestamp from the skb */ if (fep->hwts_rx_en && fep->bufdesc_ex) - fec_enet_hwtstamp(fep, ebdp->ts, + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), skb_hwtstamps(skb)); if (fep->bufdesc_ex && (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { + if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { /* don't check it */ skb->ip_summed = CHECKSUM_UNNECESSARY; } else { @@ -1512,7 +1471,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) napi_gro_receive(&fep->napi, skb); if (is_copybreak) { - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_sync_single_for_device(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); } else { @@ -1526,26 +1486,30 @@ rx_processing_done: /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; - bdp->cbd_sc = status; if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); ebdp->cbd_prot = 0; ebdp->cbd_bdu = 0; } + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + wmb(); + bdp->cbd_sc = cpu_to_fec16(status); /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); + writel(0, rxq->bd.reg_desc_active); } - rxq->cur_rx = bdp; + rxq->bd.cur = bdp; return pkt_received; } @@ -1604,7 +1568,7 @@ fec_enet_interrupt(int irq, void *dev_id) if (napi_schedule_prep(&fep->napi)) { /* Disable the NAPI interrupts */ - writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); + writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK); __napi_schedule(&fep->napi); } } @@ -1926,11 +1890,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) } else { /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if ((fep->mii_bus->phy_mask & (1 << phy_id))) - continue; - if (fep->mii_bus->phy_map[phy_id] == NULL) - continue; - if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) + if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) continue; if (dev_id--) continue; @@ -1972,9 +1932,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; - netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), - fep->phy_dev->irq); + phy_attached_info(phy_dev); return 0; } @@ -1985,7 +1943,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct device_node *node; - int err = -ENXIO, i; + int err = -ENXIO; u32 mii_speed, holdtime; /* @@ -2067,15 +2025,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->priv = fep; fep->mii_bus->parent = &pdev->dev; - fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!fep->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - fep->mii_bus->irq[i] = PHY_POLL; - node = of_get_child_by_name(pdev->dev.of_node, "mdio"); if (node) { err = of_mdiobus_register(fep->mii_bus, node); @@ -2085,7 +2034,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) } if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; mii_cnt++; @@ -2095,8 +2044,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) return 0; -err_out_free_mdio_irq: - kfree(fep->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(fep->mii_bus); err_out: @@ -2107,7 +2054,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) { if (--mii_cnt == 0) { mdiobus_unregister(fep->mii_bus); - kfree(fep->mii_bus->irq); mdiobus_free(fep->mii_bus); } } @@ -2162,8 +2108,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) /* List of registers that can be safety be read to dump them with ethtool */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ - defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2673,25 +2618,25 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; - bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { skb = rxq->rx_skbuff[i]; rxq->rx_skbuff[i] = NULL; if (skb) { dma_unmap_single(&fep->pdev->dev, - bdp->cbd_bufaddr, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); dev_kfree_skb(skb); } - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } } for (q = 0; q < fep->num_tx_queues; q++) { txq = fep->tx_queue[q]; - bdp = txq->tx_bd_base; - for (i = 0; i < txq->tx_ring_size; i++) { + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { kfree(txq->tx_bounce[i]); txq->tx_bounce[i] = NULL; skb = txq->tx_skbuff[i]; @@ -2711,7 +2656,7 @@ static void fec_enet_free_queue(struct net_device *ndev) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; dma_free_coherent(NULL, - txq->tx_ring_size * TSO_HEADER_SIZE, + txq->bd.ring_size * TSO_HEADER_SIZE, txq->tso_hdrs, txq->tso_hdrs_dma); } @@ -2737,15 +2682,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev) } fep->tx_queue[i] = txq; - txq->tx_ring_size = TX_RING_SIZE; - fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + txq->bd.ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_wake_threshold = - (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + (txq->bd.ring_size - txq->tx_stop_threshold) / 2; txq->tso_hdrs = dma_alloc_coherent(NULL, - txq->tx_ring_size * TSO_HEADER_SIZE, + txq->bd.ring_size * TSO_HEADER_SIZE, &txq->tso_hdrs_dma, GFP_KERNEL); if (!txq->tso_hdrs) { @@ -2762,8 +2707,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev) goto alloc_failed; } - fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; - fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; + fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; } return ret; @@ -2782,8 +2727,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) struct fec_enet_priv_rx_q *rxq; rxq = fep->rx_queue[queue]; - bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) goto err_alloc; @@ -2794,19 +2739,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) } rxq->rx_skbuff[i] = skb; - bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, queue); - bdp->cbd_sc |= BD_SC_WRAP; + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; err_alloc: @@ -2823,26 +2768,26 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) struct fec_enet_priv_tx_q *txq; txq = fep->tx_queue[queue]; - bdp = txq->tx_bd_base; - for (i = 0; i < txq->tx_ring_size; i++) { + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); if (!txq->tx_bounce[i]) goto err_alloc; - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; + bdp->cbd_sc = cpu_to_fec16(0); + bdp->cbd_bufaddr = cpu_to_fec32(0); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_TX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, queue); - bdp->cbd_sc |= BD_SC_WRAP; + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; @@ -3125,6 +3070,14 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_set_features = fec_set_features, }; +static const unsigned short offset_des_active_rxq[] = { + FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 +}; + +static const unsigned short offset_des_active_txq[] = { + FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 +}; + /* * XXX: We need to clean up on failure exits here. * @@ -3132,13 +3085,15 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_enet_priv_tx_q *txq; - struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; dma_addr_t bd_dma; int bd_size; unsigned int i; + unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned dsize_log2 = __fls(dsize); + WARN_ON(dsize != (1 << dsize_log2)); #if defined(CONFIG_ARM) fep->rx_align = 0xf; fep->tx_align = 0xf; @@ -3149,12 +3104,7 @@ static int fec_enet_init(struct net_device *ndev) fec_enet_alloc_queue(ndev); - if (fep->bufdesc_ex) - fep->bufdesc_size = sizeof(struct bufdesc_ex); - else - fep->bufdesc_size = sizeof(struct bufdesc); - bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * - fep->bufdesc_size; + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; /* Allocate memory for buffer descriptors. */ cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, @@ -3172,33 +3122,35 @@ static int fec_enet_init(struct net_device *ndev) /* Set receive and transmit descriptor base. */ for (i = 0; i < fep->num_rx_queues; i++) { - rxq = fep->rx_queue[i]; - rxq->index = i; - rxq->rx_bd_base = (struct bufdesc *)cbd_base; - rxq->bd_dma = bd_dma; - if (fep->bufdesc_ex) { - bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; - cbd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); - } else { - bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; - cbd_base += rxq->rx_ring_size; - } + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; + unsigned size = dsize * rxq->bd.ring_size; + + rxq->bd.qid = i; + rxq->bd.base = cbd_base; + rxq->bd.cur = cbd_base; + rxq->bd.dma = bd_dma; + rxq->bd.dsize = dsize; + rxq->bd.dsize_log2 = dsize_log2; + rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); } for (i = 0; i < fep->num_tx_queues; i++) { - txq = fep->tx_queue[i]; - txq->index = i; - txq->tx_bd_base = (struct bufdesc *)cbd_base; - txq->bd_dma = bd_dma; - if (fep->bufdesc_ex) { - bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; - cbd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); - } else { - bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; - cbd_base += txq->tx_ring_size; - } + struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; + unsigned size = dsize * txq->bd.ring_size; + + txq->bd.qid = i; + txq->bd.base = cbd_base; + txq->bd.cur = cbd_base; + txq->bd.dma = bd_dma; + txq->bd.dsize = dsize; + txq->bd.dsize_log2 = dsize_log2; + txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); } @@ -3239,6 +3191,7 @@ static int fec_enet_init(struct net_device *ndev) static void fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; + bool active_low = false; int msec = 1; struct device_node *np = pdev->dev.of_node; @@ -3254,14 +3207,17 @@ static void fec_reset_phy(struct platform_device *pdev) if (!gpio_is_valid(phy_reset)) return; + active_low = of_property_read_bool(np, "phy-reset-active-low"); + err = devm_gpio_request_one(&pdev->dev, phy_reset, - GPIOF_OUT_INIT_LOW, "phy-reset"); + active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, + "phy-reset"); if (err) { dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } msleep(msec); - gpio_set_value_cansleep(phy_reset, 1); + gpio_set_value_cansleep(phy_reset, !active_low); } #else /* CONFIG_OF */ static void fec_reset_phy(struct platform_device *pdev) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 1e647beaf989..b5497e308302 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -22,7 +22,6 @@ struct mpc52xx_fec_mdio_priv { struct mpc52xx_fec __iomem *regs; - int mdio_irqs[PHY_MAX_ADDR]; }; static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, @@ -83,9 +82,6 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) bus->read = mpc52xx_fec_mdio_read; bus->write = mpc52xx_fec_mdio_write; - /* setup irqs */ - bus->irq = priv->mdio_irqs; - /* setup registers */ err = of_address_to_resource(np, 0, &res); if (err) diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index d78e2ba73ffa..7c92eb854925 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -934,7 +934,7 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ if (dtsec->fm_rev_info.major == 2) - if (pause_time < 0 && pause_time <= 320) { + if (pause_time <= 320) { pr_warn("pause-time: %d illegal.Should be > 320\n", pause_time); return -EINVAL; @@ -1295,7 +1295,7 @@ int dtsec_init(struct fman_mac *dtsec) err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if, dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions, - dtsec->tbiphy->addr); + dtsec->tbiphy->mdio.addr); if (err) { free_init_resources(dtsec); pr_err("DTSEC version doesn't support this i/f mode\n"); @@ -1434,11 +1434,10 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params) dtsec->tbiphy = of_phy_find_device(params->internal_phy_node); if (!dtsec->tbiphy) { pr_err("of_phy_find_device (TBI PHY) failed\n"); - put_device(&dtsec->tbiphy->dev); goto err_dtsec_drv_param; } - put_device(&dtsec->tbiphy->dev); + put_device(&dtsec->tbiphy->mdio.dev); /* Save FMan revision */ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 58bb72071c14..45e98fd8b79e 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1054,15 +1054,15 @@ int memac_init(struct fman_mac *memac) * register address space and access each one of 4 * ports inside QSGMII. */ - phy_addr = memac->pcsphy->addr; + phy_addr = memac->pcsphy->mdio.addr; qsmgii_phy_addr = (u8)((phy_addr << 2) | i); - memac->pcsphy->addr = qsmgii_phy_addr; + memac->pcsphy->mdio.addr = qsmgii_phy_addr; if (memac->basex_if) setup_sgmii_internal_phy_base_x(memac); else setup_sgmii_internal_phy(memac, fixed_link); - memac->pcsphy->addr = phy_addr; + memac->pcsphy->mdio.addr = phy_addr; } } diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 743a393ba657..e33d9d24c1db 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -961,7 +961,6 @@ _return_of_node_put: of_node_put(dev_node); _return_dev_set_drvdata: kfree(priv->fixed_link); - kfree(priv); dev_set_drvdata(dev, NULL); _return: return err; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 52e0091b4fb2..1ba359f17ec6 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) cbd_t __iomem *prev_bd; cbd_t __iomem *last_tx_bd; - last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1); /* get the current bd held in TBPTR and scan back from this point */ recheck_bd = curr_tbptr = (cbd_t __iomem *) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 016743e355de..bade2f8f9b5c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -254,7 +254,7 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; - struct mii_bus* mii = fep->phydev->bus; + struct mii_bus *mii = fep->phydev->mdio.bus; struct fec_info* fec_inf = mii->priv; r = whack_reset(fep->fec.fecp); @@ -363,7 +363,7 @@ static void stop(struct net_device *dev) const struct fs_platform_info *fpi = fep->fpi; struct fec __iomem *fecp = fep->fec.fecp; - struct fec_info* feci= fep->phydev->bus->priv; + struct fec_info *feci = fep->phydev->mdio.bus->priv; int i; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 68a428de0bc0..1f015edcca22 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -172,23 +172,16 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) goto out_free_bus; new_bus->phy_mask = ~0; - new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) { - ret = -ENOMEM; - goto out_unmap_regs; - } new_bus->parent = &ofdev->dev; platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) - goto out_free_irqs; + goto out_unmap_regs; return 0; -out_free_irqs: - kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); out_free_bus: @@ -205,7 +198,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); - kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); kfree(bitbang); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 2be383e6d258..a89267b94352 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -166,23 +166,16 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed); new_bus->phy_mask = ~0; - new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) { - ret = -ENOMEM; - goto out_unmap_regs; - } new_bus->parent = &ofdev->dev; platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) - goto out_free_irqs; + goto out_unmap_regs; return 0; -out_free_irqs: - kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); out_res: @@ -200,7 +193,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) struct fec_info *fec = bus->priv; mdiobus_unregister(bus); - kfree(bus->irq); iounmap(fec->fecp); kfree(fec); mdiobus_free(bus); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 40071dad1c57..f3c63dce1e30 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -29,7 +29,7 @@ #include <asm/io.h> #if IS_ENABLED(CONFIG_UCC_GETH) -#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */ +#include <soc/fsl/qe/ucc.h> #endif #include "gianfar.h" @@ -69,7 +69,6 @@ struct fsl_pq_mdio { struct fsl_pq_mdio_priv { void __iomem *map; struct fsl_pq_mii __iomem *regs; - int irqs[PHY_MAX_ADDR]; }; /* @@ -401,7 +400,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) new_bus->read = &fsl_pq_mdio_read; new_bus->write = &fsl_pq_mdio_write; new_bus->reset = &fsl_pq_mdio_reset; - new_bus->irq = priv->irqs; err = of_address_to_resource(np, 0, &res); if (err < 0) { diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4e394f75261e..2aa7b401cc3b 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1834,7 +1834,7 @@ static void gfar_configure_serdes(struct net_device *dev) * several seconds for it to come back. */ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); return; } @@ -1849,7 +1849,7 @@ static void gfar_configure_serdes(struct net_device *dev) BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } static int __gfar_is_rx_idle(struct gfar_private *priv) diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 650f7888e32b..5bf1ade28315 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -40,10 +40,10 @@ #include <asm/uaccess.h> #include <asm/irq.h> #include <asm/io.h> -#include <asm/immap_qe.h> -#include <asm/qe.h> -#include <asm/ucc.h> -#include <asm/ucc_fast.h> +#include <soc/fsl/qe/immap_qe.h> +#include <soc/fsl/qe/qe.h> +#include <soc/fsl/qe/ucc.h> +#include <soc/fsl/qe/ucc_fast.h> #include <asm/machdep.h> #include "ucc_geth.h" @@ -1385,7 +1385,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) value &= ~0x1000; /* Turn off autonegotiation */ phy_write(tbiphy, ENET_TBI_MII_CR, value); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); @@ -1705,7 +1705,7 @@ static void uec_configure_serdes(struct net_device *dev) * several seconds for it to come back. */ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); return; } @@ -1716,7 +1716,7 @@ static void uec_configure_serdes(struct net_device *dev) phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } /* Configure the PHY for dev. diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 75f337163ce3..5da19b440a6a 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -22,11 +22,11 @@ #include <linux/list.h> #include <linux/if_ether.h> -#include <asm/immap_qe.h> -#include <asm/qe.h> +#include <soc/fsl/qe/immap_qe.h> +#include <soc/fsl/qe/qe.h> -#include <asm/ucc.h> -#include <asm/ucc_fast.h> +#include <soc/fsl/qe/ucc.h> +#include <soc/fsl/qe/ucc_fast.h> #define DRV_DESC "QE UCC Gigabit Ethernet Controller" #define DRV_NAME "ucc_geth" diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 253f8ed0537a..0c4afe95ef54 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -500,8 +500,10 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) while (cnt && !last) { buf = priv->rx_buf[priv->rx_head]; skb = build_skb(buf, priv->rx_buf_size); - if (unlikely(!skb)) + if (unlikely(!skb)) { net_dbg_ratelimited("build_skb failed\n"); + goto refill; + } dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], RX_BUF_SIZE, DMA_FROM_DEVICE); @@ -528,6 +530,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) rx++; } +refill: buf = netdev_alloc_frag(priv->rx_buf_size); if (!buf) goto done; diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index b3645297477e..3bfe36f9405b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = { static int __ae_match(struct device *dev, const void *data) { struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); - const char *ae_id = data; - if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE)) - return 1; - - return 0; + return hdev->dev->of_node == data; } -static struct hnae_ae_dev *find_ae(const char *ae_id) +static struct hnae_ae_dev *find_ae(const struct device_node *ae_node) { struct device *dev; - WARN_ON(!ae_id); + WARN_ON(!ae_node); - dev = class_find_device(hnae_class, NULL, ae_id, __ae_match); + dev = class_find_device(hnae_class, NULL, ae_node, __ae_match); return dev ? cls_to_ae_dev(dev) : NULL; } @@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle); * return handle ptr or ERR_PTR */ struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const char *ae_id, u32 port_id, + const struct device_node *ae_node, + u32 port_id, struct hnae_buf_ops *bops) { struct hnae_ae_dev *dev; @@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, int i, j; int ret; - dev = find_ae(ae_id); + dev = find_ae(ae_node); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 6ca94dc3dda3..1cbcb9fa3fb5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -524,8 +524,11 @@ struct hnae_handle { #define ring_to_dev(ring) ((ring)->q->dev->dev) -struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id, - u32 port_id, struct hnae_buf_ops *bops); +struct hnae_handle *hnae_get_handle(struct device *owner_dev, + const struct device_node *ae_node, + u32 port_id, + struct hnae_buf_ops *bops); + void hnae_put_handle(struct hnae_handle *handle); int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner); void hnae_ae_unregister(struct hnae_ae_dev *dev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 522b264866b4..a0070d0e740d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -847,6 +847,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) { struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; + static atomic_t id = ATOMIC_INIT(-1); switch (dsaf_dev->dsaf_ver) { case AE_VERSION_1: @@ -858,6 +859,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) default: break; } + + snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME, + (int)atomic_inc_return(&id)); ae_dev->ops = &hns_dsaf_ops; ae_dev->dev = dsaf_dev->dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 1c33bd06bd5c..9439f04962e1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) int ret, i; u32 desc_num; u32 buf_size; - const char *name, *mode_str; + const char *mode_str; struct device_node *np = dsaf_dev->dev->of_node; if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) @@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) else dsaf_dev->dsaf_ver = AE_VERSION_2; - ret = of_property_read_string(np, "dsa_name", &name); - if (ret) { - dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret); - return ret; - } - strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE); - dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0'; - ret = of_property_read_string(np, "mode", &mode_str); if (ret) { dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 31c312f9826e..40205b910f80 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -18,6 +18,7 @@ struct hns_mac_cb; #define DSAF_DRV_NAME "hns_dsaf" #define DSAF_MOD_VERSION "v1.0" +#define DSAF_DEVICE_NAME "dsaf" #define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index d2263c72bd8a..12188807468c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -369,8 +369,17 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, HNS_RCB_COMMON_ENDIAN); - dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); - dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); + if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { + dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); + dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); + } else { + dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, + RCB_COM_CFG_FNA_B, false); + dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, + RCB_COM_CFG_FA_B, true); + dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG, + RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K); + } return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 29041b18741a..81fe9f849973 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -54,6 +54,9 @@ struct rcb_common_cb; #define HNS_DUMP_REG_NUM 500 #define HNS_STATIC_REG_NUM 12 +#define HNS_TSO_MODE_8BD_32K 1 +#define HNS_TSO_MDOE_4BD_16K 0 + enum rcb_int_flag { RCB_INT_FLAG_TX = 0x1, RCB_INT_FLAG_RX = (0x1 << 1), diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 5d1b746e141d..f0c4f9b09d5b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -363,6 +363,8 @@ #define RCB_COM_CFG_FA_REG 0x3C #define RCB_COM_CFG_PKT_TC_BP_REG 0x40 #define RCB_COM_CFG_PPE_TNL_CLKEN_REG 0x44 +#define RCBV2_COM_CFG_USER_REG 0x30 +#define RCBV2_COM_CFG_TSO_MODE_REG 0x50 #define RCB_COM_INTMSK_TX_PKT_REG 0x3A0 #define RCB_COM_RINT_TX_PKT_REG 0x3A8 @@ -860,6 +862,9 @@ #define PPE_COMMON_CNT_CLR_CE_B 0 #define PPE_COMMON_CNT_CLR_SNAP_EN_B 1 +#define RCB_COM_TSO_MODE_B 0 +#define RCB_COM_CFG_FNA_B 1 +#define RCB_COM_CFG_FA_B 0 #define GMAC_DUPLEX_TYPE_B 0 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 0e30846a24f8..3f77ff77abbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev) int ret; h = hnae_get_handle(&priv->netdev->dev, - priv->ae_name, priv->port_id, NULL); + priv->ae_node, priv->port_id, NULL); if (IS_ERR_OR_NULL(h)) { ret = PTR_ERR(h); dev_dbg(priv->dev, "has not handle, register notifier!\n"); @@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev) else priv->enet_ver = AE_VERSION_2; - ret = of_property_read_string(node, "ae-name", &priv->ae_name); - if (ret) - goto out_read_string_fail; + priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0); + if (IS_ERR_OR_NULL(priv->ae_node)) { + ret = PTR_ERR(priv->ae_node); + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } ret = of_property_read_u32(node, "port-id", &priv->port_id); if (ret) - goto out_read_string_fail; + goto out_read_prop_fail; hns_init_mac_addr(ndev); @@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); -out_read_string_fail: +out_read_prop_fail: free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 4b75270f014e..c68ab3d34fc2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -51,7 +51,7 @@ struct hns_nic_ops { }; struct hns_nic_priv { - const char *ae_name; + const struct device_node *ae_node; u32 enet_ver; u32 port_id; int phy_mode; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3b234176dd36..3df22840fcd1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -71,24 +71,22 @@ static void hns_get_mdix_mode(struct net_device *net_dev, struct hns_nic_priv *priv = netdev_priv(net_dev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev || !phy_dev->bus) { + if (!phy_dev || !phy_dev->mdio.bus) { cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; return; } - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_MDIX); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_MDIX); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG); + retval = phy_read(phy_dev, HNS_PHY_CSC_REG); mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG); + retval = phy_read(phy_dev, HNS_PHY_CSS_REG); mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B); is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); switch (mdix_ctrl) { case 0x0: @@ -253,53 +251,36 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) if (en) { /* speed : 1000M */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, 2); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 21, 0x1046); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 2); + phy_write(phy_dev, 21, 0x1046); /* Force Master */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 9, 0x1F00); + phy_write(phy_dev, 9, 0x1F00); /* Soft-reset */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); + phy_write(phy_dev, 0, 0x9140); /* If autoneg disabled,two soft-reset operations */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); + phy_write(phy_dev, 0, 0x9140); + phy_write(phy_dev, 22, 0xFA); /* Default is 0x0400 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x418); + phy_write(phy_dev, 1, 0x418); /* Force 1000M Link, Default is 0x0200 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x20C); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); + phy_write(phy_dev, 7, 0x20C); + phy_write(phy_dev, 22, 0); /* Enable MAC loop-back */ - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } else { - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x400); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x200); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); - - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + phy_write(phy_dev, 22, 0xFA); + phy_write(phy_dev, 1, 0x400); + phy_write(phy_dev, 7, 0x200); + phy_write(phy_dev, 22, 0); + + val = phy_read(phy_dev, COPPER_CONTROL_REG); val &= ~PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } return 0; } @@ -1018,16 +999,9 @@ int hns_phy_led_set(struct net_device *netdev, int value) struct hns_nic_priv *priv = netdev_priv(netdev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev->bus) { - netdev_err(netdev, "phy_dev->bus is null!\n"); - return -EINVAL; - } - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG, - value); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); + retval = phy_write(phy_dev, HNS_LED_FC_REG, value); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); if (retval) { netdev_err(netdev, "mdiobus_write fail !\n"); return retval; @@ -1052,19 +1026,15 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) if (phy_dev) switch (state) { case ETHTOOL_ID_ACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus, - phy_dev->addr, - HNS_LED_FC_REG); + priv->phy_led_val = phy_read(phy_dev, HNS_LED_FC_REG); - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; return 2; @@ -1079,20 +1049,18 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) return ret; break; case ETHTOOL_ID_INACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_LED_FC_REG, priv->phy_led_val); + ret = phy_write(phy_dev, HNS_LED_FC_REG, + priv->phy_led_val); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; break; diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 37491c85bc42..765ddb3dcd1a 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -458,16 +458,11 @@ static int hns_mdio_probe(struct platform_device *pdev) } mdio_dev->subctrl_vbase = - syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0)); + syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0)); if (IS_ERR(mdio_dev->subctrl_vbase)) { dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); mdio_dev->subctrl_vbase = NULL; } - new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR, - sizeof(int), GFP_KERNEL); - if (!new_bus->irq) - return -ENOMEM; - new_bus->parent = &pdev->dev; platform_set_drvdata(pdev, new_bus); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 1d5c3e16d8f4..3daf2d4a7ca0 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = { }; #endif -#ifdef CONFIG_EISA static struct eisa_device_id hp100_eisa_tbl[] = { { "HWPF180" }, /* HP J2577 rev A */ { "HWP1920" }, /* HP 27248B */ @@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = { { "" } /* Mandatory final entry ! */ }; MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); -#endif -#ifdef CONFIG_PCI static const struct pci_device_id hp100_pci_tbl[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, @@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = { {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); -#endif static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; @@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d) free_netdev(d); } -#ifdef CONFIG_EISA static int hp100_eisa_probe(struct device *gendev) { struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); @@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = { .remove = hp100_eisa_remove, } }; -#endif -#ifdef CONFIG_PCI static int hp100_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = { .probe = hp100_pci_probe, .remove = hp100_pci_remove, }; -#endif /* * module section @@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void) err = hp100_isa_init(); if (err && err != -ENODEV) goto out; -#ifdef CONFIG_EISA err = eisa_driver_register(&hp100_eisa_driver); if (err && err != -ENODEV) goto out2; -#endif -#ifdef CONFIG_PCI err = pci_register_driver(&hp100_pci_driver); if (err && err != -ENODEV) goto out3; -#endif out: return err; out3: -#ifdef CONFIG_EISA eisa_driver_unregister (&hp100_eisa_driver); out2: -#endif hp100_isa_cleanup(); goto out; } @@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void) static void __exit hp100_module_exit(void) { hp100_isa_cleanup(); -#ifdef CONFIG_EISA eisa_driver_unregister (&hp100_eisa_driver); -#endif -#ifdef CONFIG_PCI pci_unregister_driver (&hp100_pci_driver); -#endif } module_init(hp100_module_init) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index b243c3cbe68f..38f558e0bb62 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1937,8 +1937,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; u32 reta, base; - /* If the netdev is initialized we have to maintain table if possible */ - if (interface->netdev->reg_state != NETREG_UNINITIALIZED) { + /* If the Rx flow indirection table has been configured manually, we + * need to maintain it when possible. + */ + if (netif_is_rxfh_configured(interface->netdev)) { for (i = FM10K_RETA_SIZE; i--;) { reta = interface->reta[i]; if ((((reta << 24) >> 24) < rss_i) && @@ -1946,6 +1948,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) (((reta << 8) >> 24) < rss_i) && (((reta) >> 24) < rss_i)) continue; + + /* this should never happen */ + dev_err(&interface->pdev->dev, + "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); goto repopulate_reta; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 662569d5b7c0..dc1a82148ff0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1204,6 +1204,15 @@ err_queueing_scheme: return err; } +static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return fm10k_setup_tc(dev, tc->tc); +} + static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -1386,7 +1395,7 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, .ndo_set_rx_mode = fm10k_set_rx_mode, .ndo_get_stats64 = fm10k_get_stats64, - .ndo_setup_tc = fm10k_setup_tc, + .ndo_setup_tc = __fm10k_setup_tc, .ndo_set_vf_mac = fm10k_ndo_set_vf_mac, .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index c202f9b9386a..e0758ddd2e22 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -64,9 +64,6 @@ #include "i40e_dcb.h" /* Useful i40e defaults */ -#define I40E_BASE_PF_SEID 16 -#define I40E_BASE_VSI_SEID 512 -#define I40E_BASE_VEB_SEID 288 #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 @@ -104,6 +101,7 @@ #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_PRIV_FLAGS_PS BIT(4) +#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -137,6 +135,19 @@ /* default to trying for four seconds */ #define I40E_TRY_LINK_TIMEOUT (4 * HZ) +/** + * i40e_is_mac_710 - Return true if MAC is X710/XL710 + * @hw: ptr to the hardware info + **/ +static inline bool i40e_is_mac_710(struct i40e_hw *hw) +{ + if ((hw->mac.type == I40E_MAC_X710) || + (hw->mac.type == I40E_MAC_XL710)) + return true; + + return false; +} + /* driver state flags */ enum i40e_state_t { __I40E_TESTING, @@ -339,6 +350,12 @@ struct i40e_pf { #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) #define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) +#define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) +#define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44) +#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45) +#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46) +#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) +#define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -390,6 +407,7 @@ struct i40e_pf { struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ u32 vf_aq_requests; + u32 arq_overflows; /* Not fatal, possibly indicative of problems */ /* DCBx/DCBNL capability for PF that indicates * whether DCBx is managed by firmware or host @@ -491,6 +509,7 @@ struct i40e_vsi { u32 tx_busy; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; u32 rx_buf_failed; u32 rx_page_failed; @@ -746,6 +765,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) struct i40e_hw *hw = &pf->hw; u32 val; + /* definitely clear the PBA here, as this function is meant to + * clean out all previous interrupts AND enable the interrupt + */ val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); @@ -753,9 +775,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) /* skip the flush */ } -void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); #ifdef I40E_FCOE struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct net_device *netdev, @@ -785,7 +806,8 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE int i40e_close(struct net_device *netdev); -int i40e_setup_tc(struct net_device *netdev, u8 tc); +int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); void i40e_netpoll(struct net_device *netdev); int i40e_fcoe_enable(struct net_device *netdev); int i40e_fcoe_disable(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 1fd5ea82a9bc..df8e2fd6a649 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -953,6 +953,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, u16 flags; u16 ntu; + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); @@ -1020,14 +1023,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; -clean_arq_element_out: - /* Set pending if needed, unlock and return */ - if (pending != NULL) - *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); - -clean_arq_element_err: - mutex_unlock(&hw->aq.arq_mutex); - if (i40e_is_nvm_update_op(&e->desc)) { if (hw->aq.nvm_release_on_done) { i40e_release_nvm(hw); @@ -1048,6 +1043,13 @@ clean_arq_element_err: } } +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); +clean_arq_element_err: + mutex_unlock(&hw->aq.arq_mutex); + return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b22012a446a6..bb7ecbbdf948 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0004 +#define I40E_FW_API_VERSION_MINOR 0x0005 struct i40e_aq_desc { __le16 flags; @@ -145,6 +145,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -220,6 +221,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -228,6 +230,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -402,6 +405,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 @@ -422,6 +426,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -680,6 +685,17 @@ struct i40e_aqc_switch_resource_alloc_element_resp { I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) @@ -906,7 +922,8 @@ struct i40e_aqc_add_veb { I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; @@ -973,6 +990,7 @@ struct i40e_aqc_add_macvlan_element_data { #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ @@ -1069,6 +1087,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; @@ -1257,10 +1276,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 + +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; @@ -1755,7 +1780,12 @@ struct i40e_aqc_get_link_status { u8 config; #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; + u8 external_power_ability; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); @@ -1823,6 +1853,18 @@ enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) @@ -1912,6 +1954,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer { I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2191,6 +2249,7 @@ struct i40e_aqc_add_udp_tunnel { #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 2d74c6e4d7b6..f9239330f49d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -44,7 +44,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: @@ -56,19 +55,13 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_KX_X722: + case I40E_DEV_ID_QSFP_X722: case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: hw->mac.type = I40E_MAC_X722; break; - case I40E_DEV_ID_X722_VF: - case I40E_DEV_ID_X722_VF_HV: - hw->mac.type = I40E_MAC_X722_VF; - break; - case I40E_DEV_ID_VF: - case I40E_DEV_ID_VF_HV: - hw->mac.type = I40E_MAC_VF; - break; default: hw->mac.type = I40E_MAC_GENERIC; break; @@ -1959,12 +1952,19 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (set) + if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + flags |= I40E_AQC_SET_VSI_PROMISC_TX; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2040,6 +2040,37 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, } /** + * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -2284,8 +2315,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: true for default port VSI, false for control port - * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support * @veb_seid: pointer to where to put the resulting VEB SEID + * @enable_stats: true to turn on VEB stats * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink @@ -2293,8 +2324,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) **/ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, bool enable_l2_filtering, - u16 *veb_seid, + bool default_port, u16 *veb_seid, + bool enable_stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -2321,8 +2352,9 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; - if (enable_l2_filtering) - veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; + /* reverse logic here: set the bitflag to disable the stats */ + if (!enable_stats) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; cmd->veb_flags = cpu_to_le16(veb_flags); @@ -2411,6 +2443,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, (struct i40e_aqc_macvlan *)&desc.params.raw; i40e_status status; u16 buf_size; + int i; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; @@ -2424,12 +2457,17 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, cmd->seid[1] = 0; cmd->seid[2] = 0; + for (i = 0; i < count; i++) + if (is_multicast_ether_addr(mv_list[i].mac_addr)) + mv_list[i].flags |= + cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, - cmd_details); + cmd_details); return status; } @@ -2477,6 +2515,137 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, } /** + * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule + * @hw: pointer to the hw struct + * @opcode: AQ opcode for add or delete mirror rule + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @id: Destination VSI SEID or Rule ID + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for + * VEBs/VEPA elements only + **/ +static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, + u16 opcode, u16 sw_seid, u16 rule_type, u16 id, + u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule *cmd = + (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; + struct i40e_aqc_add_delete_mirror_rule_completion *resp = + (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; + i40e_status status; + u16 buf_size; + + buf_size = count * sizeof(*mr_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, opcode); + cmd->seid = cpu_to_le16(sw_seid); + cmd->rule_type = cpu_to_le16(rule_type & + I40E_AQC_MIRROR_RULE_TYPE_MASK); + cmd->num_entries = cpu_to_le16(count); + /* Dest VSI for add, rule_id for delete */ + cmd->destination = cpu_to_le16(id); + if (mr_list) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + } + + status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, + cmd_details); + if (!status || + hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { + if (rule_id) + *rule_id = le16_to_cpu(resp->rule_id); + if (rules_used) + *rules_used = le16_to_cpu(resp->mirror_rules_used); + if (rules_free) + *rules_free = le16_to_cpu(resp->mirror_rules_free); + } + return status; +} + +/** + * i40e_aq_add_mirrorrule - add a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @dest_vsi: SEID of VSI to which packets will be mirrored + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only + **/ +i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || + rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, + rule_type, dest_vsi, count, mr_list, + cmd_details, rule_id, rules_used, rules_free); +} + +/** + * i40e_aq_delete_mirrorrule - delete a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @count: length of the list + * @rule_id: Rule ID that is returned in the receive desc as part of + * add_mirrorrule. + * @mr_list: list of mirrored VLAN IDs to be removed + * @cmd_details: pointer to command details structure or NULL + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only + **/ +i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free) +{ + /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ + if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) { + if (!rule_id) + return I40E_ERR_PARAM; + } else { + /* count and mr_list shall be valid for rule_type INGRESS VLAN + * mirroring. For other rule_type, count and rule_type should + * not matter. + */ + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, + rule_type, rule_id, count, mr_list, + cmd_details, NULL, rules_used, rules_free); +} + +/** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: VF id to send msg @@ -2766,35 +2935,6 @@ i40e_aq_erase_nvm_exit: return status; } -#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01 -#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02 -#define I40E_DEV_FUNC_CAP_NPAR 0x03 -#define I40E_DEV_FUNC_CAP_OS2BMC 0x04 -#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05 -#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12 -#define I40E_DEV_FUNC_CAP_VF 0x13 -#define I40E_DEV_FUNC_CAP_VMDQ 0x14 -#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15 -#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16 -#define I40E_DEV_FUNC_CAP_VSI 0x17 -#define I40E_DEV_FUNC_CAP_DCB 0x18 -#define I40E_DEV_FUNC_CAP_FCOE 0x21 -#define I40E_DEV_FUNC_CAP_ISCSI 0x22 -#define I40E_DEV_FUNC_CAP_RSS 0x40 -#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 -#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 -#define I40E_DEV_FUNC_CAP_MSIX 0x43 -#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 -#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 -#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 -#define I40E_DEV_FUNC_CAP_CEM 0xF2 -#define I40E_DEV_FUNC_CAP_IWARP 0x51 -#define I40E_DEV_FUNC_CAP_LED 0x61 -#define I40E_DEV_FUNC_CAP_SDP 0x62 -#define I40E_DEV_FUNC_CAP_MDIO 0x63 -#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 - /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct @@ -2833,79 +2973,79 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, major_rev = cap->major_rev; switch (id) { - case I40E_DEV_FUNC_CAP_SWITCH_MODE: + case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; break; - case I40E_DEV_FUNC_CAP_MGMT_MODE: + case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; break; - case I40E_DEV_FUNC_CAP_NPAR: + case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; break; - case I40E_DEV_FUNC_CAP_OS2BMC: + case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; break; - case I40E_DEV_FUNC_CAP_VALID_FUNC: + case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; break; - case I40E_DEV_FUNC_CAP_SRIOV_1_1: + case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = true; break; - case I40E_DEV_FUNC_CAP_VF: + case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; break; - case I40E_DEV_FUNC_CAP_VMDQ: + case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = true; break; - case I40E_DEV_FUNC_CAP_802_1_QBG: + case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = true; break; - case I40E_DEV_FUNC_CAP_802_1_QBH: + case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = true; break; - case I40E_DEV_FUNC_CAP_VSI: + case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; break; - case I40E_DEV_FUNC_CAP_DCB: + case I40E_AQ_CAP_ID_DCB: if (number == 1) { p->dcb = true; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } break; - case I40E_DEV_FUNC_CAP_FCOE: + case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = true; break; - case I40E_DEV_FUNC_CAP_ISCSI: + case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = true; break; - case I40E_DEV_FUNC_CAP_RSS: + case I40E_AQ_CAP_ID_RSS: p->rss = true; p->rss_table_size = number; p->rss_table_entry_width = logical_id; break; - case I40E_DEV_FUNC_CAP_RX_QUEUES: + case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; break; - case I40E_DEV_FUNC_CAP_TX_QUEUES: + case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; break; - case I40E_DEV_FUNC_CAP_MSIX: + case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; break; - case I40E_DEV_FUNC_CAP_MSIX_VF: + case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_FLEX10: + case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { if (number == 1) { p->flex10_enable = true; @@ -2921,38 +3061,38 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->flex10_mode = logical_id; p->flex10_status = phys_id; break; - case I40E_DEV_FUNC_CAP_CEM: + case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = true; break; - case I40E_DEV_FUNC_CAP_IWARP: + case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = true; break; - case I40E_DEV_FUNC_CAP_LED: + case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = true; break; - case I40E_DEV_FUNC_CAP_SDP: + case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = true; break; - case I40E_DEV_FUNC_CAP_MDIO: + case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } break; - case I40E_DEV_FUNC_CAP_IEEE_1588: + case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = true; break; - case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR: + case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = true; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; - case I40E_DEV_FUNC_CAP_WR_CSR_PROT: + case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 2691277c0055..0fab3a9b51d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -380,17 +380,20 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, { u16 length, typelength, offset = 0; struct i40e_cee_app_prio *app; - u8 i, up, selector; + u8 i; typelength = ntohs(tlv->hdr.typelen); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); dcbcfg->numapps = length / sizeof(*app); + if (!dcbcfg->numapps) return; for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { if (app->prio_map & BIT(up)) @@ -400,13 +403,17 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, /* Get Selector from lower 2 bits, and convert to IEEE */ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); - if (selector == I40E_CEE_APP_SEL_ETHTYPE) + switch (selector) { + case I40E_CEE_APP_SEL_ETHTYPE: dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; - else if (selector == I40E_CEE_APP_SEL_TCPIP) + break; + case I40E_CEE_APP_SEL_TCPIP: dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; - else + break; + default: /* Keep selector as it is for unknown types */ dcbcfg->app[i].selector = selector; + } dcbcfg->app[i].protocolid = ntohs(app->protocol); /* Move to next app */ @@ -814,13 +821,15 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; - /* If Firmware version < v4.33 IEEE only */ - if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || - (hw->aq.fw_maj_ver < 4)) + /* If Firmware version < v4.33 on X710/XL710, IEEE only */ + if ((hw->mac.type == I40E_MAC_XL710) && + (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4))) return i40e_get_ieee_dcb_config(hw); - /* If Firmware version == v4.33 use old CEE struct */ - if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ + if ((hw->mac.type == I40E_MAC_XL710) && + ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); if (!ret) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 10744a698d6f..2a44f2e25a26 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -61,257 +61,13 @@ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) { int i; - if ((seid < I40E_BASE_VEB_SEID) || - (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB))) - dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); - else - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && pf->veb[i]->seid == seid) - return pf->veb[i]; + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == seid) + return pf->veb[i]; return NULL; } /************************************************************** - * dump - * The dump entry in debugfs is for getting a data snapshow of - * the driver's current configuration and runtime details. - * When the filesystem entry is written, a snapshot is taken. - * When the entry is read, the most recent snapshot data is dumped. - **************************************************************/ -static char *i40e_dbg_dump_buf; -static ssize_t i40e_dbg_dump_data_len; -static ssize_t i40e_dbg_dump_buffer_len; - -/** - * i40e_dbg_dump_read - read the dump data - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - int bytes_not_copied; - int len; - - /* is *ppos bigger than the available data? */ - if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf) - return 0; - - /* be sure to not read beyond the end of available data */ - len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); - - bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); - if (bytes_not_copied) - return -EFAULT; - - *ppos += len; - return len; -} - -/** - * i40e_dbg_prep_dump_buf - * @pf: the PF we're working with - * @buflen: the desired buffer length - * - * Return positive if success, 0 if failed - **/ -static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen) -{ - /* if not already big enough, prep for re alloc */ - if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) { - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buffer_len = 0; - i40e_dbg_dump_buf = NULL; - } - - /* get a new buffer if needed */ - if (!i40e_dbg_dump_buf) { - i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL); - if (i40e_dbg_dump_buf != NULL) - i40e_dbg_dump_buffer_len = buflen; - } - - return i40e_dbg_dump_buffer_len; -} - -/** - * i40e_dbg_dump_write - trigger a datadump snapshot - * @filp: the opened file - * @buffer: where to find the user's data - * @count: the length of the user's data - * @ppos: file position offset - * - * Any write clears the stats - **/ -static ssize_t i40e_dbg_dump_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct i40e_pf *pf = filp->private_data; - bool seid_found = false; - long seid = -1; - int buflen = 0; - int i, ret; - int len; - u8 *p; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - - /* decode the SEID given to be dumped */ - ret = kstrtol_from_user(buffer, count, 0, &seid); - - if (ret) { - dev_info(&pf->pdev->dev, "bad seid value\n"); - } else if (seid == 0) { - seid_found = true; - - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buffer_len = 0; - i40e_dbg_dump_data_len = 0; - i40e_dbg_dump_buf = NULL; - dev_info(&pf->pdev->dev, "debug buffer freed\n"); - - } else if (seid == pf->pf_seid || seid == 1) { - seid_found = true; - - buflen = sizeof(struct i40e_pf); - buflen += (sizeof(struct i40e_aq_desc) - * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries)); - - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - p = i40e_dbg_dump_buf; - - len = sizeof(struct i40e_pf); - memcpy(p, pf, len); - p += len; - - len = (sizeof(struct i40e_aq_desc) - * pf->hw.aq.num_asq_entries); - memcpy(p, pf->hw.aq.asq.desc_buf.va, len); - p += len; - - len = (sizeof(struct i40e_aq_desc) - * pf->hw.aq.num_arq_entries); - memcpy(p, pf->hw.aq.arq.desc_buf.va, len); - p += len; - - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "PF seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - } else if (seid >= I40E_BASE_VSI_SEID) { - struct i40e_vsi *vsi = NULL; - struct i40e_mac_filter *f; - int filter_count = 0; - - mutex_lock(&pf->switch_mutex); - vsi = i40e_dbg_find_vsi(pf, seid); - if (!vsi) { - mutex_unlock(&pf->switch_mutex); - goto write_exit; - } - - buflen = sizeof(struct i40e_vsi); - buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors; - buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs; - buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs; - buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs; - list_for_each_entry(f, &vsi->mac_filter_list, list) - filter_count++; - buflen += sizeof(struct i40e_mac_filter) * filter_count; - - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - p = i40e_dbg_dump_buf; - seid_found = true; - - len = sizeof(struct i40e_vsi); - memcpy(p, vsi, len); - p += len; - - if (vsi->num_q_vectors) { - len = (sizeof(struct i40e_q_vector) - * vsi->num_q_vectors); - memcpy(p, vsi->q_vectors, len); - p += len; - } - - if (vsi->num_queue_pairs) { - len = (sizeof(struct i40e_ring) * - vsi->num_queue_pairs); - memcpy(p, vsi->tx_rings, len); - p += len; - memcpy(p, vsi->rx_rings, len); - p += len; - } - - if (vsi->tx_rings[0]) { - len = sizeof(struct i40e_tx_buffer); - for (i = 0; i < vsi->num_queue_pairs; i++) { - memcpy(p, vsi->tx_rings[i]->tx_bi, len); - p += len; - } - len = sizeof(struct i40e_rx_buffer); - for (i = 0; i < vsi->num_queue_pairs; i++) { - memcpy(p, vsi->rx_rings[i]->rx_bi, len); - p += len; - } - } - - /* macvlan filter list */ - len = sizeof(struct i40e_mac_filter); - list_for_each_entry(f, &vsi->mac_filter_list, list) { - memcpy(p, f, len); - p += len; - } - - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "VSI seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - mutex_unlock(&pf->switch_mutex); - } else if (seid >= I40E_BASE_VEB_SEID) { - struct i40e_veb *veb = NULL; - - mutex_lock(&pf->switch_mutex); - veb = i40e_dbg_find_veb(pf, seid); - if (!veb) { - mutex_unlock(&pf->switch_mutex); - goto write_exit; - } - - buflen = sizeof(struct i40e_veb); - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - seid_found = true; - memcpy(i40e_dbg_dump_buf, veb, buflen); - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "VEB seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - mutex_unlock(&pf->switch_mutex); - } - -write_exit: - if (!seid_found) - dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid); - - return count; -} - -static const struct file_operations i40e_dbg_dump_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = i40e_dbg_dump_read, - .write = i40e_dbg_dump_write, -}; - -/************************************************************** * command * The command entry in debugfs is for giving the driver commands * to be executed - these may be for changing the internal switch @@ -379,19 +135,27 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) return; } dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); - if (vsi->netdev) - dev_info(&pf->pdev->dev, - " netdev: name = %s\n", - vsi->netdev->name); + if (vsi->netdev) { + struct net_device *nd = vsi->netdev; + + dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", + nd->name, nd->state, nd->flags); + dev_info(&pf->pdev->dev, " features = 0x%08lx\n", + (unsigned long int)nd->features); + dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", + (unsigned long int)nd->hw_features); + dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", + (unsigned long int)nd->vlan_features); + } if (vsi->active_vlans) dev_info(&pf->pdev->dev, " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, - " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", - vsi->netdev_registered, - vsi->current_netdev_flags, vsi->state, vsi->flags); + " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", + vsi->state, vsi->flags, + vsi->netdev_registered, vsi->current_netdev_flags); if (vsi == pf->vsi[pf->lan_vsi]) - dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", pf->hw.mac.addr, pf->hw.mac.san_addr, pf->hw.mac.port_addr); @@ -511,7 +275,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->dtype); dev_info(&pf->pdev->dev, " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, rx_ring->hsplit, + i, ring_is_ps_enabled(rx_ring), rx_ring->next_to_use, rx_ring->next_to_clean, rx_ring->ring_active); @@ -526,6 +290,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->rx_stats.alloc_page_failed, rx_ring->rx_stats.alloc_buff_failed); dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n", + i, + rx_ring->rx_stats.realloc_count, + rx_ring->rx_stats.page_reuse_count); + dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i, dma = 0x%08lx\n", i, rx_ring->size, (unsigned long int)rx_ring->dma); @@ -557,8 +326,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " tx_rings[%i]: dtype = %d\n", i, tx_ring->dtype); dev_info(&pf->pdev->dev, - " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, tx_ring->hsplit, + " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->ring_active); @@ -815,20 +584,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (!is_rx_ring) { txd = I40E_TX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); @@ -843,20 +612,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (!is_rx_ring) { txd = I40E_TX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); @@ -918,12 +687,6 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) { struct i40e_veb *veb; - if ((seid < I40E_BASE_VEB_SEID) || - (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) { - dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); - return; - } - veb = i40e_dbg_find_veb(pf, seid); if (!veb) { dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); @@ -2202,11 +1965,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf) if (!pfile) goto create_failed; - pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_dump_fops); - if (!pfile) - goto create_failed; - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_netdev_ops_fops); if (!pfile) @@ -2227,9 +1985,6 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf) { debugfs_remove_recursive(pf->i40e_dbg_pf); pf->i40e_dbg_pf = NULL; - - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buf = NULL; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index c601ca4a610c..99257fcd1ef4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 @@ -40,13 +39,11 @@ #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_20G_KR2_A 0x1588 #define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_KX_X722 0x37CE +#define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 29d5833e24a3..9fc7546bfc9b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -89,6 +89,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), + I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt), + I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), + I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, @@ -143,6 +146,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_oversize", stats.rx_oversize), I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("arq_overflows", arq_overflows), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), @@ -232,6 +236,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "flow-director-atr", "veb-stats", "packet-split", + "hw-atr-eviction", }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) @@ -340,7 +345,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (pf->hw.mac.type == I40E_MAC_X722) { + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { ecmd->supported |= SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) @@ -411,6 +416,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, if (pf->hw.mac.type == I40E_MAC_X722) { ecmd->supported |= SUPPORTED_100baseT_Full; ecmd->advertising |= ADVERTISED_100baseT_Full; + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_100baseT_Full; + } } } if (phy_types & I40E_CAP_PHY_TYPE_XAUI || @@ -996,16 +1005,19 @@ static int i40e_get_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { - struct i40e_nvm_access *cmd; - int errno; + struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; + int errno = 0; /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) - return -EINVAL; + errno = -EINVAL; + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + errno = -EBUSY; + else + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - cmd = (struct i40e_nvm_access *)eeprom; - ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) + if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1089,27 +1101,25 @@ static int i40e_set_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; - struct i40e_nvm_access *cmd; + struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; int ret_val = 0; - int errno; + int errno = 0; u32 magic; /* normal ethtool set_eeprom is not supported */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic == magic) - return -EOPNOTSUPP; - + errno = -EOPNOTSUPP; /* check for NVMUpdate access method */ - if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) - return -EINVAL; - - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) - return -EBUSY; + else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) + errno = -EINVAL; + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + errno = -EBUSY; + else + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - cmd = (struct i40e_nvm_access *)eeprom; - ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) + if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -2166,9 +2176,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: @@ -2178,9 +2191,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: @@ -2190,10 +2206,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; @@ -2204,10 +2223,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; @@ -2712,6 +2734,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_VEB_STATS : 0; ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? I40E_PRIV_FLAGS_PS : 0; + ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? + 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; return ret_flags; } @@ -2763,10 +2787,21 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; } - if (flags & I40E_PRIV_FLAGS_VEB_STATS) + if ((flags & I40E_PRIV_FLAGS_VEB_STATS) && + !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; - else + reset_required = true; + } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + reset_required = true; + } + + if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && + (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) + pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; + else + pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE; /* if needed, issue reset to cause things to take effect */ if (reset_required) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 579a46ca82df..7c66ce416ec7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1457,7 +1457,7 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, - .ndo_setup_tc = i40e_setup_tc, + .ndo_setup_tc = __i40e_setup_tc, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1598fb31477a..2f2b2d714f63 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -24,6 +24,10 @@ * ******************************************************************************/ +#include <linux/etherdevice.h> +#include <linux/of_net.h> +#include <linux/pci.h> + /* Local includes */ #include "i40e.h" #include "i40e_diag.h" @@ -42,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 8 +#define DRV_VERSION_BUILD 15 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -73,7 +77,6 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb); static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, @@ -82,6 +85,8 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, @@ -102,6 +107,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *i40e_wq; + /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -287,7 +294,7 @@ static void i40e_service_event_schedule(struct i40e_pf *pf) if (!test_bit(__I40E_DOWN, &pf->state) && !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) - schedule_work(&pf->service_task); + queue_work(i40e_wq, &pf->service_task); } /** @@ -761,7 +768,7 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) if (vsi->type != I40E_VSI_FCOE) return; - idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; + idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; fs = &vsi->fcoe_stats; ofs = &vsi->fcoe_stats_offsets; @@ -812,6 +819,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; + u64 tx_lost_interrupt; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; @@ -837,6 +845,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_lost_interrupt = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -855,6 +864,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -873,6 +883,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_lost_interrupt = tx_lost_interrupt; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1360,7 +1371,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, f->changed = true; INIT_LIST_HEAD(&f->list); - list_add(&f->list, &vsi->mac_filter_list); + list_add_tail(&f->list, &vsi->mac_filter_list); } /* increment counter and add a new flag if needed */ @@ -1530,7 +1541,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ether_addr_copy(netdev->dev_addr, addr->sa_data); - return i40e_sync_vsi_filters(vsi); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -1754,6 +1769,11 @@ bottom_of_search_loop: vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** @@ -1925,7 +1945,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kzalloc(del_list_size, GFP_KERNEL); + del_list = kzalloc(del_list_size, GFP_ATOMIC); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); @@ -2003,7 +2023,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) sizeof(struct i40e_aqc_add_macvlan_element_data), add_list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); - add_list = kzalloc(add_list_size, GFP_KERNEL); + add_list = kzalloc(add_list_size, GFP_ATOMIC); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); @@ -2102,7 +2122,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); - if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { + if ((vsi->type == I40E_VSI_MAIN) && + (pf->lan_veb != I40E_NO_VEB) && + !(pf->flags & I40E_FLAG_MFP_ENABLED)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic @@ -2152,6 +2174,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } } out: + /* if something went wrong then set the changed flag so we try again */ + if (retval) + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + clear_bit(__I40E_CONFIG_BUSY, &vsi->state); return retval; } @@ -3237,14 +3263,15 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) /** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure + * @clearpba: true when all pending interrupt events should be cleared **/ -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba) { struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, val); @@ -3252,22 +3279,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) } /** - * i40e_irq_dynamic_disable - Disable default interrupt generation settings - * @vsi: pointer to a vsi - * @vector: disable a particular Hw Interrupt vector - **/ -void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 val; - - val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - i40e_flush(hw); -} - -/** * i40e_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector @@ -3392,7 +3403,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, true); } i40e_flush(&pf->hw); @@ -3451,16 +3462,12 @@ static irqreturn_t i40e_intr(int irq, void *data) struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; - /* temporarily disable queue cause for NAPI processing */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); - - qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_RQCTL(0), qval); - - qval = rd32(hw, I40E_QINT_TQCTL(0)); - qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - + /* We do not have a way to disarm Queue causes while leaving + * interrupt enabled for all other causes, ideally + * interrupt should be disabled while we are in NAPI but + * this is not a performance path and napi_schedule() + * can deal with rescheduling. + */ if (!test_bit(__I40E_DOWN, &pf->state)) napi_schedule_irqoff(&q_vector->napi); } @@ -3468,6 +3475,7 @@ static irqreturn_t i40e_intr(int irq, void *data) if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { @@ -3538,7 +3546,7 @@ enable_intr: wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_service_event_schedule(pf); - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, false); } return ret; @@ -3742,7 +3750,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) #ifdef CONFIG_NET_POLL_CONTROLLER /** - * i40e_netpoll - A Polling 'interrupt'handler + * i40e_netpoll - A Polling 'interrupt' handler * @netdev: network interface device structure * * This is used by netconsole to send skbs without having to re-enable @@ -4344,7 +4352,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) { struct i40e_ring *tx_ring = NULL; struct i40e_pf *pf; - u32 head, val, tx_pending; + u32 head, val, tx_pending_hw; int i; pf = vsi->back; @@ -4370,16 +4378,9 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); - /* Bail out if interrupts are disabled because napi_poll - * execution in-progress or will get scheduled soon. - * napi_poll cleans TX and RX queues and updates 'next_to_clean'. - */ - if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) - return; - head = i40e_get_head(tx_ring); - tx_pending = i40e_get_tx_pending(tx_ring); + tx_pending_hw = i40e_get_tx_pending(tx_ring, false); /* HW is done executing descriptors, updated HEAD write back, * but SW hasn't processed those descriptors. If interrupt is @@ -4387,12 +4388,12 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) * dev_watchdog detecting timeout on those netdev_queue, * hence proactively trigger SW interrupt. */ - if (tx_pending) { + if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { /* NAPI Poll didn't run and clear since it was set */ if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &tx_ring->q_vector->hung_detected)) { - netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", - vsi->seid, q_idx, tx_pending, + netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", + vsi->seid, q_idx, tx_pending_hw, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail)); @@ -4405,6 +4406,17 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) &tx_ring->q_vector->hung_detected); } } + + /* This is the case where we have interrupts missing, + * so the tx_pending in HW will most likely be 0, but we + * will have tx_pending in SW since the WB happened but the + * interrupt got lost. + */ + if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && + (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { + if (napi_reschedule(&tx_ring->q_vector->napi)) + tx_ring->tx_stats.tx_lost_interrupt++; + } } /** @@ -5008,8 +5020,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) int err = 0; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) + if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) goto out; /* Get the initial DCB configuration */ @@ -5241,11 +5252,7 @@ void i40e_down(struct i40e_vsi *vsi) * @netdev: net device to configure * @tc: number of traffic classes to enable **/ -#ifdef I40E_FCOE -int i40e_setup_tc(struct net_device *netdev, u8 tc) -#else static int i40e_setup_tc(struct net_device *netdev, u8 tc) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5298,6 +5305,19 @@ exit: return ret; } +#ifdef I40E_FCOE +int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +#else +static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +#endif +{ + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + return i40e_setup_tc(netdev, tc->tc); +} + /** * i40e_open - Called when a network interface is made active * @netdev: network interface device structure @@ -5340,7 +5360,8 @@ int i40e_open(struct net_device *netdev) vxlan_get_rx_port(netdev); #endif #ifdef CONFIG_I40E_GENEVE - geneve_get_rx_port(netdev); + if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) + geneve_get_rx_port(netdev); #endif return 0; @@ -6236,6 +6257,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; + pf->arq_overflows++; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) @@ -6311,7 +6333,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: case i40e_aqc_opc_oem_post_update: - i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + i40e_debug(&pf->hw, I40E_DEBUG_NVM, + "ARQ NVM operation 0x%04x completed\n", + opcode); break; default: dev_info(&pf->pdev->dev, @@ -6795,12 +6819,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) goto end_core_reset; - /* driver is only interested in link up/down and module qualification - * reports from firmware + /* The driver only wants link up/down and module qualification + * reports from firmware. Note the negative logic. */ ret = i40e_aq_set_phy_int_mask(&pf->hw, - I40E_AQ_EVENT_LINK_UPDOWN | - I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), @@ -6881,8 +6905,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) wr32(hw, I40E_REG_MSS, val); } - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) @@ -7103,15 +7126,14 @@ static void i40e_service_task(struct work_struct *work) } i40e_detect_recover_hung(pf); + i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); i40e_sync_filters_subtask(pf); -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) i40e_sync_udp_filters_subtask(pf); -#endif i40e_clean_adminq_subtask(pf); i40e_service_event_complete(pf); @@ -7452,8 +7474,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->dcb_tc = 0; if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; - if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) - tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -7846,7 +7866,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) i40e_flush(hw); - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, true); return err; } @@ -7930,6 +7950,52 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) } /** + * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands + * @vsi: Pointer to vsi structure + * @seed: Buffter to store the hash keys + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries + * + * Return 0 on success, negative on failure + */ +static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int ret = 0; + + if (seed) { + ret = i40e_aq_get_rss_key(hw, vsi->id, + (struct i40e_aqc_get_set_rss_key_data *)seed); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot get RSS key, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } + } + + if (lut) { + bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; + + ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot get RSS lut, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } + } + + return ret; +} + +/** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure * @seed: RSS hash seed @@ -8031,7 +8097,12 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) */ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { - return i40e_get_rss_reg(vsi, seed, lut, lut_size); + struct i40e_pf *pf = vsi->back; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_get_rss_aq(vsi, seed, lut, lut_size); + else + return i40e_get_rss_reg(vsi, seed, lut, lut_size); } /** @@ -8361,6 +8432,26 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4))) { + pf->flags |= I40E_FLAG_RESTART_AUTONEG; + /* No DCB support for FW < v4.33 */ + pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; + } + + /* Disable FW LLDP if FW < v4.3 */ + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || + (pf->hw.aq.fw_maj_ver < 4))) + pf->flags |= I40E_FLAG_STOP_FW_LLDP; + + /* Use the FW Set LLDP MIB API if FW > v4.40 */ + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || + (pf->hw.aq.fw_maj_ver >= 5))) + pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; + if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; @@ -8387,8 +8478,19 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_100M_SGMII_CAPABLE | + I40E_FLAG_USE_SET_LLDP_MIB | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + } else if ((pf->hw.aq.api_maj_ver > 1) || + ((pf->hw.aq.api_maj_ver == 1) && + (pf->hw.aq.api_min_ver > 4))) { + /* Supported in FW API version higher than 1.4 */ + pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + } else { + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } + pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -8507,6 +8609,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) } #endif + +#if IS_ENABLED(CONFIG_VXLAN) /** * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * @netdev: This physical port's netdev @@ -8516,16 +8620,12 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) static void i40e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 next_idx; u8 idx; - if (sa_family == AF_INET6) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ @@ -8549,7 +8649,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev, pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; -#endif } /** @@ -8561,15 +8660,11 @@ static void i40e_add_vxlan_port(struct net_device *netdev, static void i40e_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 idx; - if (sa_family == AF_INET6) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ @@ -8584,9 +8679,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev, netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } -#endif } +#endif +#if IS_ENABLED(CONFIG_GENEVE) /** * i40e_add_geneve_port - Get notifications about GENEVE ports that come up * @netdev: This physical port's netdev @@ -8596,14 +8692,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev, static void i40e_add_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 next_idx; u8 idx; - if (sa_family == AF_INET6) + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) return; idx = i40e_get_udp_port_idx(pf, port); @@ -8631,7 +8726,6 @@ static void i40e_add_geneve_port(struct net_device *netdev, pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); -#endif } /** @@ -8643,13 +8737,12 @@ static void i40e_add_geneve_port(struct net_device *netdev, static void i40e_del_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 idx; - if (sa_family == AF_INET6) + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) return; idx = i40e_get_udp_port_idx(pf, port); @@ -8669,8 +8762,8 @@ static void i40e_del_geneve_port(struct net_device *netdev, netdev_warn(netdev, "geneve port %d was not found, not deleting\n", ntohs(port)); } -#endif } +#endif static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) @@ -8887,7 +8980,7 @@ static const struct net_device_ops i40e_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, #endif - .ndo_setup_tc = i40e_setup_tc, + .ndo_setup_tc = __i40e_setup_tc, #ifdef I40E_FCOE .ndo_fcoe_enable = i40e_fcoe_enable, .ndo_fcoe_disable = i40e_fcoe_disable, @@ -8939,11 +9032,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_RXCSUM | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_TSO; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + 0; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | @@ -8964,6 +9061,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) netdev->features |= NETIF_F_NTUPLE; + if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; @@ -9522,6 +9621,44 @@ err_vsi: } /** + * i40e_macaddr_init - explicitly write the mac address filters. + * + * @vsi: pointer to the vsi. + * @macaddr: the MAC address + * + * This is needed when the macaddr has been obtained by other + * means than the default, e.g., from Open Firmware or IDPROM. + * Returns 0 on success, negative on failure + **/ +static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) +{ + int ret; + struct i40e_aqc_add_macvlan_element_data element; + + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + macaddr, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Addr change for VSI failed: %d\n", ret); + return -EADDRNOTAVAIL; + } + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add filter failed err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); + } + return ret; +} + +/** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type @@ -9645,6 +9782,17 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: + /* Apply relevant filters if a platform-specific mac + * address was selected. + */ + if (!!(pf->flags & I40E_FLAG_PF_MAC)) { + ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); + if (ret) { + dev_warn(&pf->pdev->dev, + "could not set up macaddr; err %d\n", + ret); + } + } case I40E_VSI_VMDQ2: case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); @@ -9923,13 +10071,13 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; bool is_default = veb->pf->cur_promisc; - bool is_cloud = false; + bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret; /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, - is_cloud, &veb->seid, NULL); + &veb->seid, enable_stats, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", @@ -10474,6 +10622,24 @@ static void i40e_print_features(struct i40e_pf *pf) } /** + * i40e_get_platform_mac_addr - get platform-specific MAC address + * + * @pdev: PCI device information struct + * @pf: board private structure + * + * Look up the MAC address in Open Firmware on systems that support it, + * and use IDPROM on SPARC if no OF address is found. On return, the + * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value + * has been selected. + **/ +static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) +{ + pf->flags &= ~I40E_FLAG_PF_MAC; + if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) + pf->flags |= I40E_FLAG_PF_MAC; +} + +/** * i40e_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl @@ -10493,7 +10659,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 wol_nvm_bits; u16 link_status; int err; - u32 len; u32 val; u32 i; u8 set_fc_aq_fail; @@ -10676,13 +10841,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, NULL); } i40e_get_mac_addr(hw, hw->mac.addr); + /* allow a platform config to override the HW addr */ + i40e_get_platform_mac_addr(pdev, pf); if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; @@ -10750,8 +10916,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ - len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; - pf->vsi = kzalloc(len, GFP_KERNEL); + pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), + GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; @@ -10798,12 +10964,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - /* driver is only interested in link up/down and module qualification - * reports from firmware + /* The driver only wants link up/down and module qualification + * reports from firmware. Note the negative logic. */ err = i40e_aq_set_phy_int_mask(&pf->hw, - I40E_AQ_EVENT_LINK_UPDOWN | - I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), @@ -10820,8 +10986,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) @@ -10855,8 +11020,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, &pf->state)) { - u32 val; - /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; @@ -11057,8 +11220,8 @@ static void i40e_remove(struct pci_dev *pdev) i40e_vsi_release(pf->vsi[pf->lan_vsi]); /* shutdown and destroy the HMC */ - if (pf->hw.hmc.hmc_obj) { - ret_code = i40e_shutdown_lan_hmc(&pf->hw); + if (hw->hmc.hmc_obj) { + ret_code = i40e_shutdown_lan_hmc(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the HMC resources: %d\n", @@ -11066,7 +11229,7 @@ static void i40e_remove(struct pci_dev *pdev) } /* shutdown the adminq */ - ret_code = i40e_shutdown_adminq(&pf->hw); + ret_code = i40e_shutdown_adminq(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the Admin Queue resources: %d\n", @@ -11094,7 +11257,7 @@ static void i40e_remove(struct pci_dev *pdev) kfree(pf->qp_pile); kfree(pf->vsi); - iounmap(pf->hw.hw_addr); + iounmap(hw->hw_addr); kfree(pf); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); @@ -11329,6 +11492,16 @@ static int __init i40e_init_module(void) i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + /* we will see if single thread per module is enough for now, + * it can't be any worse than using the system workqueue which + * was already single threaded + */ + i40e_wq = create_singlethread_workqueue(i40e_driver_name); + if (!i40e_wq) { + pr_err("%s: Failed to create workqueue\n", i40e_driver_name); + return -ENOMEM; + } + i40e_dbg_init(); return pci_register_driver(&i40e_driver); } @@ -11343,6 +11516,7 @@ module_init(i40e_init_module); static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); + destroy_workqueue(i40e_wq); i40e_dbg_exit(); } module_exit(i40e_exit_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 6100cdd9ad13..5730f8091e1b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -693,10 +693,11 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->aq.nvm_release_on_done); + hw->aq.nvm_release_on_done, + cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { *perrno = -EFAULT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index bb9d583e5416..e8deabde82b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -127,6 +127,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); @@ -135,8 +138,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, bool enable_l2_filtering, - u16 *pveb_seid, + bool default_port, u16 *pveb_seid, + bool enable_stats, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, @@ -149,6 +152,15 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free); +i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free); + i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index e9e9a37ee274..1d3afa7dda18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -610,15 +610,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) /** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors + * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40e_get_tx_pending(struct i40e_ring *ring) +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - head = i40e_get_head(ring); + if (!in_sw) + head = i40e_get_head(ring); + else + head = ring->next_to_clean; tail = readl(ring->tail); if (head != tail) @@ -741,7 +745,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40e_get_tx_pending(tx_ring); + j = i40e_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && @@ -774,29 +778,48 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } /** - * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors + * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about - * @q_vector: the vector on which to force writeback + * @q_vector: the vector on which to enable writeback * **/ -void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; + u32 val; - if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { - u32 val; + if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + return; - if (q_vector->arm_wb_state) - return; + if (q_vector->arm_wb_state) + return; - val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK; + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), val); - q_vector->arm_wb_state = true; - } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + } else { + val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ + + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); + } + q_vector->arm_wb_state = true; +} + +/** + * i40e_force_wb - Issue SW Interrupt so HW does a wb + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | @@ -1041,7 +1064,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (rx_bi->page_dma) { dma_unmap_page(dev, rx_bi->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE, DMA_FROM_DEVICE); rx_bi->page_dma = 0; } @@ -1176,16 +1199,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + const int current_node = numa_node_id(); /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -1193,56 +1219,79 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) if (bi->skb) /* desc is in use */ goto no_buffers; + + /* If we've been moved to a different NUMA node, release the + * page so we can get a new one on the current node. + */ + if (bi->page && page_to_nid(bi->page) != current_node) { + dma_unmap_page(rx_ring->dev, + bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(bi->page); + bi->page = NULL; + bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else if (bi->page) { + rx_ring->rx_stats.page_reuse_count++; + } + if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { rx_ring->rx_stats.alloc_page_failed++; goto no_buffers; } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; bi->page_dma = dma_map_page(rx_ring->dev, bi->page, - bi->page_offset, - PAGE_SIZE / 2, + 0, + PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { + if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { rx_ring->rx_stats.alloc_page_failed++; + __free_page(bi->page); + bi->page = NULL; bi->page_dma = 0; + bi->page_offset = 0; goto no_buffers; } + bi->page_offset = 0; } - dma_sync_single_range_for_device(rx_ring->dev, - bi->dma, - 0, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1251,7 +1300,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -1259,8 +1308,10 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; goto no_buffers; @@ -1278,6 +1329,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) if (dma_mapping_error(rx_ring->dev, bi->dma)) { rx_ring->rx_stats.alloc_buff_failed++; bi->dma = 0; + dev_kfree_skb(bi->skb); + bi->skb = NULL; goto no_buffers; } } @@ -1289,9 +1342,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** @@ -1326,16 +1389,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4 = false, ipv6 = false; - bool ipv4_tunnel, ipv6_tunnel; - __wsum rx_udp_csum; - struct iphdr *iph; - __sum16 csum; - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; @@ -1351,12 +1405,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (!(decoded.known && decoded.outer_ip)) return; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) - ipv4 = true; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) - ipv6 = true; + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -1380,37 +1432,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check - * it in the driver, hardware does not do it for us. - * Since L3L4P bit was set we assume a valid IHL value (>=5) - * so the total length of IPv4 header is IHL*4 bytes - * The UDP_0 bit *may* bet set if the *inner* header is UDP + /* The hardware supported by this driver does not validate outer + * checksums for tunneled VXLAN or GENEVE frames. I don't agree + * with it but the specification states that you "MAY validate", it + * doesn't make it a hard requirement so if we have validated the + * inner checksum report CHECKSUM_UNNECESSARY. */ - if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && - (ipv4_tunnel)) { - skb->transport_header = skb->mac_header + - sizeof(struct ethhdr) + - (ip_hdr(skb)->ihl * 4); - - /* Add 4 bytes for VLAN tagged packets */ - skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - ? VLAN_HLEN : 0; - - if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && - (udp_hdr(skb)->check != 0)) { - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic( - iph->saddr, iph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); - - if (udp_hdr(skb)->check != csum) - goto checksum_fail; - - } /* else its GRE and so no outer UDP header */ - } + + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; @@ -1422,31 +1454,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1464,24 +1477,49 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + bool failure = false; u8 rx_ptype; u64 qword; + u32 copysize; if (budget <= 0) return 0; @@ -1492,7 +1530,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + failure = failure || + i40e_alloc_rx_buffers_ps(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1510,6 +1550,12 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) * DD bit is set. */ dma_rmb(); + /* sync header buffer for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc); I40E_RX_INCREMENT(rx_ring, i); @@ -1518,10 +1564,13 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; + failure = true; break; } @@ -1529,8 +1578,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_record_rx_queue(skb, rx_ring->queue_index); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); } @@ -1548,9 +1597,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - prefetch(rx_bi->page); + /* sync half-page for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->page_dma, + rx_bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + prefetch(page_address(rx_bi->page) + rx_bi->page_offset); rx_bi->skb = NULL; cleaned_count++; + copysize = 0; if (rx_hbo || rx_sph) { int len; @@ -1561,38 +1617,50 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); } else if (skb->len == 0) { int len; + unsigned char *va = page_address(rx_bi->page) + + rx_bi->page_offset; - len = (rx_packet_len > skb_headlen(skb) ? - skb_headlen(skb) : rx_packet_len); - memcpy(__skb_put(skb, len), - rx_bi->page + rx_bi->page_offset, - len); - rx_bi->page_offset += len; + len = min(rx_packet_len, rx_ring->rx_hdr_len); + memcpy(__skb_put(skb, len), va, len); + copysize = len; rx_packet_len -= len; } - /* Get the rest of the data if this was a header split */ if (rx_packet_len) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset, - rx_packet_len); - - skb->len += rx_packet_len; - skb->data_len += rx_packet_len; - skb->truesize += rx_packet_len; - - if ((page_count(rx_bi->page) == 1) && - (page_to_nid(rx_bi->page) == current_node)) - get_page(rx_bi->page); - else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset + copysize, + rx_packet_len, I40E_RXBUFFER_2048); + + /* If the page count is more than 2, then both halves + * of the page are used and we need to free it. Do it + * here instead of in the alloc code. Otherwise one + * of the half-pages might be released between now and + * then, and we wouldn't know which one to use. + * Don't call get_page and free_page since those are + * both expensive atomic operations that just change + * the refcount in opposite directions. Just give the + * page to the stack; he can have our refcount. + */ + if (page_count(rx_bi->page) > 2) { + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); rx_bi->page = NULL; + rx_bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else { + get_page(rx_bi->page); + /* switch to the other half-page here; the + * allocation code programs the right addr + * into HW. If we haven't used this half-page, + * the address won't be changed, and HW can + * just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; + } - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; } I40E_RX_INCREMENT(rx_ring, i); @@ -1612,8 +1680,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1651,7 +1719,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } /** @@ -1669,6 +1737,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; u16 rx_packet_len; + bool failure = false; u8 rx_ptype; u64 qword; u16 i; @@ -1679,7 +1748,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + failure = failure || + i40e_alloc_rx_buffers_1buf(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1741,8 +1812,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1779,7 +1849,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -1787,7 +1857,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); @@ -1902,7 +1974,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + clean_complete = clean_complete && + i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1926,7 +1999,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ - clean_complete &= (budget_per_ring != cleaned); + clean_complete = clean_complete && (budget_per_ring > cleaned); } /* If work not completed, return budget and polling will return */ @@ -1934,7 +2007,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_force_wb(vsi, q_vector); + i40e_enable_wb_on_itr(vsi, q_vector); } return budget; } @@ -1947,20 +2020,7 @@ tx_only: if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { i40e_update_enable_itr(vsi, q_vector); } else { /* Legacy mode */ - struct i40e_hw *hw = &vsi->back->hw; - /* We re-enable the queue 0 cause, but - * don't worry about dynamic_enable - * because we left it on for the other - * possible interrupts during napi - */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) | - I40E_QINT_RQCTL_CAUSE_ENA_MASK; - - wr32(hw, I40E_QINT_RQCTL(0), qval); - qval = rd32(hw, I40E_QINT_TQCTL(0)) | - I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - i40e_irq_dynamic_enable_icr0(vsi->back); + i40e_irq_dynamic_enable_icr0(vsi->back, false); } return 0; } @@ -1970,10 +2030,9 @@ tx_only: * @tx_ring: ring to add programming descriptor to * @skb: send buffer * @tx_flags: send tx flags - * @protocol: wire protocol **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol) + u32 tx_flags) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; @@ -1985,6 +2044,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, struct tcphdr *th; unsigned int hlen; u32 flex_ptype, dtype_cmd; + int l4_proto; u16 i; /* make sure ATR is enabled */ @@ -1998,36 +2058,28 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; + /* Currently only IPv4/IPv6 with TCP is supported */ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; - if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) { - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + /* snag network header to get L4 type and address */ + hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? + skb_inner_network_header(skb) : skb_network_header(skb); - /* Currently only IPv4/IPv6 with TCP is supported - * access ihl as u8 to avoid unaligned access on ia64 - */ - if (tx_flags & I40E_TX_FLAGS_IPV4) - hlen = (hdr.network[0] & 0x0F) << 2; - else if (protocol == htons(ETH_P_IPV6)) - hlen = sizeof(struct ipv6hdr); - else - return; + /* Note: tx_flags gets modified to reflect inner protocols in + * tx_enable_csum function if encap is enabled. + */ + if (tx_flags & I40E_TX_FLAGS_IPV4) { + /* access ihl as u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + l4_proto = hdr.ipv4->protocol; } else { - hdr.network = skb_inner_network_header(skb); - hlen = skb_inner_network_header_len(skb); + hlen = hdr.network - skb->data; + l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); + hlen -= hdr.network - skb->data; } - /* Currently only IPv4/IPv6 with TCP is supported - * Note: tx_flags gets modified to reflect inner protocols in - * tx_enable_csum function if encap is enabled. - */ - if ((tx_flags & I40E_TX_FLAGS_IPV4) && - (hdr.ipv4->protocol != IPPROTO_TCP)) - return; - else if ((tx_flags & I40E_TX_FLAGS_IPV6) && - (hdr.ipv6->nexthdr != IPPROTO_TCP)) + if (l4_proto != IPPROTO_TCP) return; th = (struct tcphdr *)(hdr.network + hlen); @@ -2035,7 +2087,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && + (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2063,7 +2116,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & I40E_TXD_FLTR_QW0_QINDEX_MASK; - flex_ptype |= (protocol == htons(ETH_P_IP)) ? + flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << @@ -2097,7 +2150,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && + (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); @@ -2202,13 +2256,23 @@ out: static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { - u32 cd_cmd, cd_tso_len, cd_mss; - struct ipv6hdr *ipv6h; - struct tcphdr *tcph; - struct iphdr *iph; - u32 l4len; + u64 cd_cmd, cd_tso_len, cd_mss; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -2216,35 +2280,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); - - if (iph->version == 4) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - } else if (ipv6h->version == 6) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - ipv6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, - 0, IPPROTO_TCP, 0); + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; } - l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); - *hdr_len = (skb->encapsulation - ? (skb_inner_transport_header(skb) - skb->data) - : skb_transport_offset(skb)) + l4len; + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + /* determine offset of outer transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from outer checksum */ + paylen = (__force u16)l4.udp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.udp->check = ~csum_fold((__force __wsum)paylen); + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = (__force u16)l4.tcp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.tcp->check = ~csum_fold((__force __wsum)paylen); + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = skb_shinfo(skb)->gso_size; - *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - ((u64)cd_tso_len << - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); return 1; } @@ -2299,129 +2388,154 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, - u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, - u32 *cd_tunneling) +static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) { - struct ipv6hdr *this_ipv6_hdr; - unsigned int this_tcp_hdrlen; - struct iphdr *this_ip_hdr; - u32 network_hdr_len; - u8 l4_hdr = 0; - struct udphdr *oudph; - struct iphdr *oiph; - u32 l4_tunnel = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + u32 offset, cmd = 0, tunnel = 0; + __be16 frag_off; + u8 l4_proto = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { - switch (ip_hdr(skb)->protocol) { + /* define outer network header type */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_CTX_EXT_IP_IPV4 : + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* define outer transport */ + switch (l4_proto) { case IPPROTO_UDP: - oudph = udp_hdr(skb); - oiph = ip_hdr(skb); - l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + tunnel |= I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: - l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; + tunnel |= I40E_TXD_CTX_GRE_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; default: - return; - } - network_hdr_len = skb_inner_network_header_len(skb); - this_ip_hdr = inner_ip_hdr(skb); - this_ipv6_hdr = inner_ipv6_hdr(skb); - this_tcp_hdrlen = inner_tcp_hdrlen(skb); - - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; if (*tx_flags & I40E_TX_FLAGS_TSO) - ip_hdr(skb)->check = 0; + return -1; + + skb_checksum_help(skb); + return 0; } - /* Now set the ctx descriptor fields */ - *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - l4_tunnel | - ((skb_inner_network_offset(skb) - - skb_transport_offset(skb)) >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (this_ip_hdr->version == 6) { - *tx_flags &= ~I40E_TX_FLAGS_IPV4; + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + /* indicate if we need to offload outer UDP header */ + if ((*tx_flags & I40E_TX_FLAGS_TSO) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + + /* record tunnel offload values */ + *cd_tunneling |= tunnel; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + if (ip.v6->version == 6) *tx_flags |= I40E_TX_FLAGS_IPV6; - } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && - (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && - (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { - oudph->check = ~csum_tcpudp_magic(oiph->saddr, - oiph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, 0); - *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - } - } else { - network_hdr_len = skb_network_header_len(skb); - this_ip_hdr = ip_hdr(skb); - this_ipv6_hdr = ipv6_hdr(skb); - this_tcp_hdrlen = tcp_hdrlen(skb); } /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - l4_hdr = this_ip_hdr->protocol; + l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - this_ip_hdr->check = 0; - } else { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; - } - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : + I40E_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_hdr = this_ipv6_hdr->nexthdr; - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } - /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ - *td_offset |= (skb_network_offset(skb) >> 1) << - I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* compute inner L3 header size */ + offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ - switch (l4_hdr) { + switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (this_tcp_hdrlen >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - break; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; } + + *td_cmd |= cmd; + *td_offset |= offset; + + return 1; } /** @@ -2858,12 +2972,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, td_cmd |= I40E_TX_DESC_CMD_ICRC; /* Always offload the checksum, since it's in the data descriptor */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - tx_flags |= I40E_TX_FLAGS_CSUM; - - i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - } + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); @@ -2872,7 +2984,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, * * NOTE: this must always be directly before the data descriptor. */ - i40e_atr(tx_ring, skb, tx_flags, protocol); + i40e_atr(tx_ring, skb, tx_flags); i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3f081e25e097..fde5f42524fb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -153,7 +153,6 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM BIT(0) #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) @@ -203,12 +202,15 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; }; struct i40e_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; + u64 page_reuse_count; + u64 realloc_count; }; enum i40e_ring_state_t { @@ -254,7 +256,6 @@ struct i40e_ring { #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 - u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 @@ -275,7 +276,6 @@ struct i40e_ring { u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) #define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) /* stats structs */ @@ -316,8 +316,8 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); void i40e_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); @@ -337,7 +337,7 @@ int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags); #endif void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40e_get_tx_pending(struct i40e_ring *ring); +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index dd2da356d9a1..b59a021b7a69 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1098,6 +1098,10 @@ enum i40e_filter_program_desc_pcmd { I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) + #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index aa58a498c239..5dcd19869a41 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -461,7 +461,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; /* set splitalways mode 10b */ - rx_ctx.dtype = 0x2; + rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; } /* databuffer length validation */ @@ -980,7 +980,7 @@ err_alloc: i40e_free_vfs(pf); err_iov: /* Re-enable interrupt 0. */ - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, false); return ret; } @@ -1213,9 +1213,21 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; } + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + } + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + } + vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; @@ -2025,7 +2037,11 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; - /* re-enable vflr interrupt cause */ + /* Re-enable the VFLR interrupt cause here, before looking for which + * VF got reset. Otherwise, if another VF gets a reset while the + * first one is being processed, that interrupt will be lost, and + * that VF will be stuck in reset forever. + */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); @@ -2078,15 +2094,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, - "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } - if (!is_valid_ether_addr(mac)) { + if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, - "Invalid VF ethernet address\n"); + "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); ret = -EINVAL; goto error_param; } @@ -2097,9 +2113,10 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); + if (!is_zero_ether_addr(vf->default_lan_addr.addr)) + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); /* Delete all the filters for this VSI - we're going to kill it * anyway. @@ -2162,8 +2179,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_pvid; } @@ -2282,8 +2300,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error; } @@ -2291,6 +2310,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, case I40E_LINK_SPEED_40GB: speed = 40000; break; + case I40E_LINK_SPEED_20GB: + speed = 20000; + break; case I40E_LINK_SPEED_10GB: speed = 10000; break; @@ -2356,8 +2378,9 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } @@ -2472,6 +2495,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; + goto out; + } if (enable == vf->spoofchk) goto out; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index da44995def42..e74642a0c42e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -91,8 +91,8 @@ struct i40e_vf { * When assigned, these will be non-zero, because VSI 0 is always * the main LAN VSI for the PF. */ - u8 lan_vsi_idx; /* index into PF struct */ - u8 lan_vsi_id; /* ID as used by firmware */ + u16 lan_vsi_idx; /* index into PF struct */ + u16 lan_vsi_id; /* ID as used by firmware */ u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 3f65e39b3fe4..44f7ed7583dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -887,6 +887,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, u16 flags; u16 ntu; + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index f5b2b369dc7c..815e481ccd9c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0004 +#define I40E_FW_API_VERSION_MINOR 0x0005 struct i40e_aq_desc { __le16 flags; @@ -145,6 +145,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -220,6 +221,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -228,6 +230,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -399,6 +402,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 @@ -419,6 +423,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -677,6 +682,17 @@ struct i40e_aqc_switch_resource_alloc_element_resp { I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) @@ -903,7 +919,8 @@ struct i40e_aqc_add_veb { I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; @@ -970,6 +987,7 @@ struct i40e_aqc_add_macvlan_element_data { #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ @@ -1066,6 +1084,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; @@ -1254,10 +1273,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 + +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; @@ -1752,7 +1777,12 @@ struct i40e_aqc_get_link_status { u8 config; #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; + u8 external_power_ability; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); @@ -1820,6 +1850,18 @@ enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) @@ -1909,6 +1951,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer { I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2083,6 +2141,7 @@ struct i40e_aqc_add_udp_tunnel { #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 72b1942a94aa..938783e0baac 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -44,7 +44,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index e6a39c9862e8..ca8b58c3d1f5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 4ca40651a228..6d66fcdc6122 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -129,15 +129,19 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) /** * i40evf_get_tx_pending - how many Tx descriptors not processed * @tx_ring: the ring of descriptors + * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40evf_get_tx_pending(struct i40e_ring *ring) +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - head = i40e_get_head(ring); + if (!in_sw) + head = i40e_get_head(ring); + else + head = ring->next_to_clean; tail = readl(ring->tail); if (head != tail) @@ -252,6 +256,22 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + unsigned int j = 0; + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + j = i40evf_get_tx_pending(tx_ring, false); + + if (budget && + ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), total_packets, total_bytes); @@ -276,39 +296,49 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } /** - * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors + * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about - * @q_vector: the vector on which to force writeback + * @q_vector: the vector on which to enable writeback * **/ -static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; + u32 val; - if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { - u32 val; + if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + return; - if (q_vector->arm_wb_state) - return; + if (q_vector->arm_wb_state) + return; - val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK; + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + - vsi->base_vector - 1), - val); - q_vector->arm_wb_state = true; - } else { - u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ - - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + - vsi->base_vector - 1), val); - } + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), val); + q_vector->arm_wb_state = true; +} + +/** + * i40evf_force_wb - Issue SW Interrupt so HW does a wb + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK + /* allow 00 to be written to the index */; + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), + val); } /** @@ -506,7 +536,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (rx_bi->page_dma) { dma_unmap_page(dev, rx_bi->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE, DMA_FROM_DEVICE); rx_bi->page_dma = 0; } @@ -641,16 +671,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + const int current_node = numa_node_id(); /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -658,56 +691,79 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) if (bi->skb) /* desc is in use */ goto no_buffers; + + /* If we've been moved to a different NUMA node, release the + * page so we can get a new one on the current node. + */ + if (bi->page && page_to_nid(bi->page) != current_node) { + dma_unmap_page(rx_ring->dev, + bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(bi->page); + bi->page = NULL; + bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else if (bi->page) { + rx_ring->rx_stats.page_reuse_count++; + } + if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { rx_ring->rx_stats.alloc_page_failed++; goto no_buffers; } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; bi->page_dma = dma_map_page(rx_ring->dev, bi->page, - bi->page_offset, - PAGE_SIZE / 2, + 0, + PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { + if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { rx_ring->rx_stats.alloc_page_failed++; + __free_page(bi->page); + bi->page = NULL; bi->page_dma = 0; + bi->page_offset = 0; goto no_buffers; } + bi->page_offset = 0; } - dma_sync_single_range_for_device(rx_ring->dev, - bi->dma, - 0, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -716,7 +772,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -724,8 +780,10 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; goto no_buffers; @@ -743,6 +801,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) if (dma_mapping_error(rx_ring->dev, bi->dma)) { rx_ring->rx_stats.alloc_buff_failed++; bi->dma = 0; + dev_kfree_skb(bi->skb); + bi->skb = NULL; goto no_buffers; } } @@ -754,9 +814,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** @@ -791,16 +861,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4 = false, ipv6 = false; - bool ipv4_tunnel, ipv6_tunnel; - __wsum rx_udp_csum; - struct iphdr *iph; - __sum16 csum; - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; @@ -816,12 +877,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (!(decoded.known && decoded.outer_ip)) return; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) - ipv4 = true; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) - ipv6 = true; + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -845,36 +904,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN traffic has an outer UDPv4 checksum we need to check - * it in the driver, hardware does not do it for us. - * Since L3L4P bit was set we assume a valid IHL value (>=5) - * so the total length of IPv4 header is IHL*4 bytes - * The UDP_0 bit *may* bet set if the *inner* header is UDP + /* The hardware supported by this driver does not validate outer + * checksums for tunneled VXLAN or GENEVE frames. I don't agree + * with it but the specification states that you "MAY validate", it + * doesn't make it a hard requirement so if we have validated the + * inner checksum report CHECKSUM_UNNECESSARY. */ - if (ipv4_tunnel) { - skb->transport_header = skb->mac_header + - sizeof(struct ethhdr) + - (ip_hdr(skb)->ihl * 4); - - /* Add 4 bytes for VLAN tagged packets */ - skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - ? VLAN_HLEN : 0; - - if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && - (udp_hdr(skb)->check != 0)) { - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - (skb->len - - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); - - if (udp_hdr(skb)->check != csum) - goto checksum_fail; - - } /* else its GRE and so no outer UDP header */ - } + + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; @@ -886,31 +926,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -928,24 +949,49 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + bool failure = false; u8 rx_ptype; u64 qword; + u32 copysize; do { struct i40e_rx_buffer *rx_bi; @@ -953,7 +999,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + failure = failure || + i40evf_alloc_rx_buffers_ps(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -971,13 +1019,22 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) * DD bit is set. */ dma_rmb(); + /* sync header buffer for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; + failure = true; break; } @@ -985,8 +1042,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_record_rx_queue(skb, rx_ring->queue_index); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); } @@ -1004,9 +1061,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - prefetch(rx_bi->page); + /* sync half-page for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->page_dma, + rx_bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + prefetch(page_address(rx_bi->page) + rx_bi->page_offset); rx_bi->skb = NULL; cleaned_count++; + copysize = 0; if (rx_hbo || rx_sph) { int len; @@ -1017,38 +1081,50 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); } else if (skb->len == 0) { int len; + unsigned char *va = page_address(rx_bi->page) + + rx_bi->page_offset; - len = (rx_packet_len > skb_headlen(skb) ? - skb_headlen(skb) : rx_packet_len); - memcpy(__skb_put(skb, len), - rx_bi->page + rx_bi->page_offset, - len); - rx_bi->page_offset += len; + len = min(rx_packet_len, rx_ring->rx_hdr_len); + memcpy(__skb_put(skb, len), va, len); + copysize = len; rx_packet_len -= len; } - /* Get the rest of the data if this was a header split */ if (rx_packet_len) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset, - rx_packet_len); - - skb->len += rx_packet_len; - skb->data_len += rx_packet_len; - skb->truesize += rx_packet_len; - - if ((page_count(rx_bi->page) == 1) && - (page_to_nid(rx_bi->page) == current_node)) - get_page(rx_bi->page); - else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset + copysize, + rx_packet_len, I40E_RXBUFFER_2048); + + /* If the page count is more than 2, then both halves + * of the page are used and we need to free it. Do it + * here instead of in the alloc code. Otherwise one + * of the half-pages might be released between now and + * then, and we wouldn't know which one to use. + * Don't call get_page and free_page since those are + * both expensive atomic operations that just change + * the refcount in opposite directions. Just give the + * page to the stack; he can have our refcount. + */ + if (page_count(rx_bi->page) > 2) { + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); rx_bi->page = NULL; + rx_bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else { + get_page(rx_bi->page); + /* switch to the other half-page here; the + * allocation code programs the right addr + * into HW. If we haven't used this half-page, + * the address won't be changed, and HW can + * just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; + } - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; } I40E_RX_INCREMENT(rx_ring, i); @@ -1068,8 +1144,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1100,7 +1176,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } /** @@ -1118,6 +1194,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; u16 rx_packet_len; + bool failure = false; u8 rx_ptype; u64 qword; u16 i; @@ -1128,7 +1205,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + failure = failure || + i40evf_alloc_rx_buffers_1buf(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1185,8 +1264,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1210,7 +1288,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -1218,7 +1296,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); @@ -1331,7 +1411,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + clean_complete = clean_complete && + i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1355,7 +1436,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ - clean_complete &= (budget_per_ring != cleaned); + clean_complete = clean_complete && (budget_per_ring > cleaned); } /* If work not completed, return budget and polling will return */ @@ -1363,7 +1444,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40evf_force_wb(vsi, q_vector); + i40e_enable_wb_on_itr(vsi, q_vector); } return budget; } @@ -1443,13 +1524,23 @@ out: static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { - u32 cd_cmd, cd_tso_len, cd_mss; - struct ipv6hdr *ipv6h; - struct tcphdr *tcph; - struct iphdr *iph; - u32 l4len; + u64 cd_cmd, cd_tso_len, cd_mss; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -1457,35 +1548,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); - - if (iph->version == 4) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - } else if (ipv6h->version == 6) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - ipv6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, - 0, IPPROTO_TCP, 0); + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; } - l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); - *hdr_len = (skb->encapsulation - ? (skb_inner_transport_header(skb) - skb->data) - : skb_transport_offset(skb)) + l4len; + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + /* determine offset of outer transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from outer checksum */ + paylen = (__force u16)l4.udp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.udp->check = ~csum_fold((__force __wsum)paylen); + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = (__force u16)l4.tcp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.tcp->check = ~csum_fold((__force __wsum)paylen); + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = skb_shinfo(skb)->gso_size; - *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - ((u64)cd_tso_len << - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); return 1; } @@ -1495,129 +1611,157 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, - u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, - u32 *cd_tunneling) +static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) { - struct ipv6hdr *this_ipv6_hdr; - unsigned int this_tcp_hdrlen; - struct iphdr *this_ip_hdr; - u32 network_hdr_len; - u8 l4_hdr = 0; - struct udphdr *oudph; - struct iphdr *oiph; - u32 l4_tunnel = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + u32 offset, cmd = 0, tunnel = 0; + __be16 frag_off; + u8 l4_proto = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { - switch (ip_hdr(skb)->protocol) { + /* define outer network header type */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_CTX_EXT_IP_IPV4 : + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* define outer transport */ + switch (l4_proto) { case IPPROTO_UDP: - oudph = udp_hdr(skb); - oiph = ip_hdr(skb); - l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + tunnel |= I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + break; + case IPPROTO_GRE: + tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: - return; - } - network_hdr_len = skb_inner_network_header_len(skb); - this_ip_hdr = inner_ip_hdr(skb); - this_ipv6_hdr = inner_ipv6_hdr(skb); - this_tcp_hdrlen = inner_tcp_hdrlen(skb); - - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; if (*tx_flags & I40E_TX_FLAGS_TSO) - ip_hdr(skb)->check = 0; - } + return -1; - /* Now set the ctx descriptor fields */ - *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - l4_tunnel | - ((skb_inner_network_offset(skb) - - skb_transport_offset(skb)) >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (this_ip_hdr->version == 6) { - *tx_flags &= ~I40E_TX_FLAGS_IPV4; - *tx_flags |= I40E_TX_FLAGS_IPV6; + skb_checksum_help(skb); + return 0; } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && - (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && - (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { - oudph->check = ~csum_tcpudp_magic(oiph->saddr, - oiph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, 0); - *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - } - } else { - network_hdr_len = skb_network_header_len(skb); - this_ip_hdr = ip_hdr(skb); - this_ipv6_hdr = ipv6_hdr(skb); - this_tcp_hdrlen = tcp_hdrlen(skb); + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + /* indicate if we need to offload outer UDP header */ + if ((*tx_flags & I40E_TX_FLAGS_TSO) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + + /* record tunnel offload values */ + *cd_tunneling |= tunnel; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + if (ip.v6->version == 6) + *tx_flags |= I40E_TX_FLAGS_IPV6; } /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - l4_hdr = this_ip_hdr->protocol; + l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - this_ip_hdr->check = 0; - } else { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; - } - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : + I40E_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_hdr = this_ipv6_hdr->nexthdr; - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } - /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ - *td_offset |= (skb_network_offset(skb) >> 1) << - I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* compute inner L3 header size */ + offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ - switch (l4_hdr) { + switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (this_tcp_hdrlen >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - break; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; } + + *td_cmd |= cmd; + *td_offset |= offset; + + return 1; } /** @@ -2033,12 +2177,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, td_cmd |= I40E_TX_DESC_CMD_ICRC; /* Always offload the checksum, since it's in the data descriptor */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - tx_flags |= I40E_TX_FLAGS_CSUM; - - i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - } + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index e29bb3e86cfd..6ea8701cf066 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -153,7 +153,6 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM BIT(0) #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) @@ -202,12 +201,15 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; }; struct i40e_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; + u64 page_reuse_count; + u64 realloc_count; }; enum i40e_ring_state_t { @@ -253,7 +255,6 @@ struct i40e_ring { #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 - u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 @@ -273,7 +274,6 @@ struct i40e_ring { u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) /* stats structs */ struct i40e_queue_stats stats; @@ -313,8 +313,8 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); @@ -324,7 +324,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); -u32 i40evf_get_tx_pending(struct i40e_ring *ring); +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index be1b72b93888..e657eccd232c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -173,6 +173,7 @@ enum i40evf_state_t { __I40EVF_RESETTING, /* in reset */ /* Below here, watchdog is running */ __I40EVF_DOWN, /* ready, can be opened */ + __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */ __I40EVF_TESTING, /* in ethtool self-test */ __I40EVF_RUNNING, /* opened, working */ }; @@ -273,6 +274,9 @@ struct i40evf_adapter { }; +/* Ethtool Private Flags */ +#define I40EVF_PRIV_FLAGS_PS BIT(0) + /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; extern const char i40evf_driver_version[]; @@ -280,6 +284,7 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); int i40evf_process_config(struct i40evf_adapter *adapter); +void i40evf_schedule_reset(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index a4c9feb589e7..dd4430aae7fa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -63,6 +63,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) +static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = { + "packet-split", +}; + +#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings) + /** * i40evf_get_settings - Get Link Speed and Duplex settings * @netdev: network interface device structure @@ -97,6 +103,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return I40EVF_STATS_LEN(netdev); + else if (sset == ETH_SS_PRIV_FLAGS) + return I40EVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -162,6 +170,12 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); p += ETH_GSTRING_LEN; } + } else if (sset == ETH_SS_PRIV_FLAGS) { + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40evf_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } } } @@ -211,6 +225,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; } /** @@ -459,6 +474,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, struct ethtool_rxnfc *nfc) { struct i40e_hw *hw = &adapter->hw; + u32 flags = adapter->vf_res->vf_offload_flags; u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); @@ -477,54 +493,50 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, switch (nfc->flow_type) { case TCP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - default: + } else { return -EINVAL; } break; case TCP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - default: + } else { return -EINVAL; } break; case UDP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - default: + } else { return -EINVAL; } break; case UDP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: + } else { return -EINVAL; } break; @@ -713,6 +725,54 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, I40EVF_HLUT_ARRAY_SIZE); } +/** + * i40evf_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40evf_get_priv_flags(struct net_device *dev) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + u32 ret_flags = 0; + + ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ? + I40EVF_PRIV_FLAGS_PS : 0; + + return ret_flags; +} + +/** + * i40evf_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int i40evf_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + bool reset_required = false; + + if ((flags & I40EVF_PRIV_FLAGS_PS) && + !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { + adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; + reset_required = true; + } else if (!(flags & I40EVF_PRIV_FLAGS_PS) && + (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; + reset_required = true; + } + + /* if needed, issue reset to cause things to take effect */ + if (reset_required) + i40evf_schedule_reset(adapter); + + return 0; +} + static const struct ethtool_ops i40evf_ethtool_ops = { .get_settings = i40evf_get_settings, .get_drvinfo = i40evf_get_drvinfo, @@ -722,6 +782,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_strings = i40evf_get_strings, .get_ethtool_stats = i40evf_get_ethtool_stats, .get_sset_count = i40evf_get_sset_count, + .get_priv_flags = i40evf_get_priv_flags, + .set_priv_flags = i40evf_set_priv_flags, .get_msglevel = i40evf_get_msglevel, .set_msglevel = i40evf_set_msglevel, .get_coalesce = i40evf_get_coalesce, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index a0bbb6b9f5aa..3396fe32cc6d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -32,13 +32,13 @@ static int i40evf_close(struct net_device *netdev); char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = - "Intel(R) XL710/X710 Virtual Function Network Driver"; + "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 4 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -69,6 +69,8 @@ MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *i40evf_wq; + /** * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -171,6 +173,19 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) } /** + * i40evf_schedule_reset - Set the flags and schedule a reset event + * @adapter: board private structure + **/ +void i40evf_schedule_reset(struct i40evf_adapter *adapter) +{ + if (!(adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } +} + +/** * i40evf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ @@ -179,11 +194,7 @@ static void i40evf_tx_timeout(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | - I40EVF_FLAG_RESET_NEEDED))) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } + i40evf_schedule_reset(adapter); } /** @@ -636,35 +647,22 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) int rx_buf_len; - adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE; - adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; - - /* Decide whether to use packet split mode or not */ - if (netdev->mtu > ETH_DATA_LEN) { - if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE) - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - else - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - } else { - if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE) - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - else - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - } - /* Set the RX buffer length according to the mode */ - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - rx_buf_len = I40E_RX_HDR_SIZE; - } else { - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = I40EVF_RXBUFFER_2048; - else - rx_buf_len = ALIGN(max_frame, 1024); - } + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED || + netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = I40EVF_RXBUFFER_2048; + else + rx_buf_len = ALIGN(max_frame, 1024); for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i].rx_buf_len = rx_buf_len; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + set_ring_ps_enabled(&adapter->rx_rings[i]); + adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE; + } else { + clear_ring_ps_enabled(&adapter->rx_rings[i]); + } } } @@ -1001,7 +999,12 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + i40evf_alloc_rx_headers(ring); + i40evf_alloc_rx_buffers_ps(ring, ring->count); + } else { i40evf_alloc_rx_buffers_1buf(ring, ring->count); + } ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -1032,7 +1035,7 @@ void i40evf_down(struct i40evf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct i40evf_mac_filter *f; - if (adapter->state == __I40EVF_DOWN) + if (adapter->state <= __I40EVF_DOWN_PENDING) return; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, @@ -1122,7 +1125,9 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) if (!adapter->vsi_res) return; kfree(adapter->tx_rings); + adapter->tx_rings = NULL; kfree(adapter->rx_rings); + adapter->rx_rings = NULL; } /** @@ -1239,7 +1244,7 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", + dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", adapter->current_op); return -EBUSY; } @@ -1454,7 +1459,11 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter) int ret; /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - hena = I40E_DEFAULT_RSS_HENA; + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + else + hena = I40E_DEFAULT_RSS_HENA; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); @@ -1829,6 +1838,7 @@ static void i40evf_reset_task(struct work_struct *work) break; msleep(I40EVF_RESET_WAIT_MS); } + pci_set_master(adapter->pdev); /* extra wait to make sure minimum wait is met */ msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { @@ -1873,6 +1883,7 @@ static void i40evf_reset_task(struct work_struct *work) adapter->netdev->flags &= ~IFF_UP; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + adapter->state = __I40EVF_DOWN; dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2142,7 +2153,8 @@ static int i40evf_open(struct net_device *netdev) dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - if (adapter->state != __I40EVF_DOWN || adapter->aq_required) + + if (adapter->state != __I40EVF_DOWN) return -EBUSY; /* allocate transmit descriptors */ @@ -2197,14 +2209,14 @@ static int i40evf_close(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); - if (adapter->state <= __I40EVF_DOWN) + if (adapter->state <= __I40EVF_DOWN_PENDING) return 0; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_down(adapter); - adapter->state = __I40EVF_DOWN; + adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); return 0; @@ -2325,9 +2337,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter) NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | NETIF_F_GRO; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; netdev->hw_features &= ~NETIF_F_RXCSUM; @@ -2466,11 +2493,20 @@ static void i40evf_init_task(struct work_struct *work) default: goto err_alloc; } + + if (hw->mac.type == I40E_MAC_X722_VF) + adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE; + if (i40evf_process_config(adapter)) goto err_alloc; adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; + adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE; + + /* Default to single buffer rx, can be changed through ethtool. */ + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); @@ -2502,10 +2538,9 @@ static void i40evf_init_task(struct work_struct *work) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; - if (!RSS_AQ(adapter)) - i40evf_init_rss(adapter); + err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -2885,6 +2920,11 @@ static int __init i40evf_init_module(void) pr_info("%s\n", i40evf_copyright); + i40evf_wq = create_singlethread_workqueue(i40evf_driver_name); + if (!i40evf_wq) { + pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); + return -ENOMEM; + } ret = pci_register_driver(&i40evf_driver); return ret; } @@ -2900,6 +2940,7 @@ module_init(i40evf_init_module); static void __exit i40evf_exit_module(void) { pci_unregister_driver(&i40evf_driver); + destroy_workqueue(i40evf_wq); } module_exit(i40evf_exit_module); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index c1c526283757..488e738f76c6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -270,6 +270,10 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + vqpi->rxq.splithdr_enabled = true; + vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE; + } vqpi++; } @@ -804,6 +808,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case I40E_VIRTCHNL_OP_DISABLE_QUEUES: i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); + if (adapter->state == __I40EVF_DOWN_PENDING) + adapter->state = __I40EVF_DOWN; break; case I40E_VIRTCHNL_OP_VERSION: case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index adb33e2a0137..9a1a9c7b0748 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -34,6 +34,7 @@ #include "e1000_mac.h" #include "e1000_82575.h" #include "e1000_i210.h" +#include "igb.h" static s32 igb_get_invariants_82575(struct e1000_hw *); static s32 igb_acquire_phy_82575(struct e1000_hw *); @@ -71,6 +72,32 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + int i; + + for (i = 10; i--;) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); + adapter->shadow_vfta[offset] = value; +} + /** * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO * @hw: pointer to the HW structure @@ -398,6 +425,8 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) /* Set mta register count */ mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; /* Set rar entry count */ switch (mac->type) { case e1000_82576: @@ -429,6 +458,11 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; } + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + mac->ops.write_vfta = igb_write_vfta_i350; + else + mac->ops.write_vfta = igb_write_vfta; + /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Set if manageability features are enabled. */ @@ -1517,10 +1551,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) /* Disabling VLAN filtering */ hw_dbg("Initializing the IEEE VLAN\n"); - if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) - igb_clear_vfta_i350(hw); - else - igb_clear_vfta(hw); + igb_clear_vfta(hw); /* Setup the receive address */ igb_init_rx_addrs(hw, rar_count); diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index c3c598c347a9..e9f23ee8f15e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -356,7 +356,8 @@ /* Ethertype field values */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 /* PBA constants */ #define E1000_PBA_34K 0x0022 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 4034207eb5cc..f0c416e21d2c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -325,7 +325,7 @@ struct e1000_mac_operations { s32 (*get_thermal_sensor_data)(struct e1000_hw *); s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); #endif - + void (*write_vfta)(struct e1000_hw *, u32, u32); }; struct e1000_phy_operations { diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 2a88595f956c..07cf4fe58338 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -92,10 +92,8 @@ void igb_clear_vfta(struct e1000_hw *hw) { u32 offset; - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - array_wr32(E1000_VFTA, offset, 0); - wrfl(); - } + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); } /** @@ -107,54 +105,14 @@ void igb_clear_vfta(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) { + struct igb_adapter *adapter = hw->back; + array_wr32(E1000_VFTA, offset, value); wrfl(); -} - -/* Due to a hw errata, if the host tries to configure the VFTA register - * while performing queries from the BMC or DMA, then the VFTA in some - * cases won't be written. - */ -/** - * igb_clear_vfta_i350 - Clear VLAN filter table - * @hw: pointer to the HW structure - * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. - **/ -void igb_clear_vfta_i350(struct e1000_hw *hw) -{ - u32 offset; - int i; - - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, 0); - - wrfl(); - } -} - -/** - * igb_write_vfta_i350 - Write value to VLAN filter table - * @hw: pointer to the HW structure - * @offset: register offset in VLAN filter table - * @value: register value written to VLAN filter table - * - * Writes value at the given offset in the register array which stores - * the VLAN filter table. - **/ -static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) -{ - int i; - - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, value); - - wrfl(); + adapter->shadow_vfta[offset] = value; } /** @@ -183,40 +141,155 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) } /** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + +/** * igb_vfta_set - enable or disable vlan in VLAN filter table * @hw: pointer to the HW structure - * @vid: VLAN id to add or remove - * @add: if true add filter, if false remove + * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id + * @vlan_on: if true add filter, if false remove * * Sets or clears a bit in the VLAN filter table array based on VLAN id * and if we are adding or removing the filter **/ -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) { - u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; - u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); - u32 vfta; struct igb_adapter *adapter = hw->back; - s32 ret_val = 0; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; - vfta = adapter->shadow_vfta[index]; + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; - /* bit was set/cleared before we started */ - if ((!!(vfta & mask)) == add) { - ret_val = -E1000_ERR_CONFIG; - } else { - if (add) - vfta |= mask; - else - vfta &= ~mask; + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = adapter->shadow_vfta[regidx]; + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; } - if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) - igb_write_vfta_i350(hw, index, vfta); - else - igb_write_vfta(hw, index, vfta); - adapter->shadow_vfta[index] = vfta; - return ret_val; + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: + /* bit was set/cleared before we started */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + return 0; } /** diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index ea24961b0d70..90c8893c3eed 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -56,8 +56,9 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, void igb_clear_hw_cntrs_base(struct e1000_hw *hw); void igb_clear_vfta(struct e1000_hw *hw); -void igb_clear_vfta_i350(struct e1000_hw *hw); -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind, + bool vlan_on, bool vlvf_bypass); void igb_config_collision_dist(struct e1000_hw *hw); void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); void igb_mta_set(struct e1000_hw *hw, u32 hash_value); diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 162cc49345d0..10f5c9e016a9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -322,14 +322,20 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; + int count = 10; - /* Take ownership of the buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + do { + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); - /* reserve mailbox for vf use */ - p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); - if (p2v_mailbox & E1000_P2VMAILBOX_PFU) - ret_val = 0; + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); return ret_val; } diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index e3cb93bdb21a..707ae5c297ea 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -95,7 +95,6 @@ struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; - u16 vlans_enabled; u32 flags; unsigned long last_nack; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ @@ -482,6 +481,7 @@ struct igb_adapter { #define IGB_FLAG_MAS_ENABLE (1 << 12) #define IGB_FLAG_HAS_MSIX (1 << 13) #define IGB_FLAG_EEE (1 << 14) +#define IGB_FLAG_VLAN_PROMISC BIT(15) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 31e5f3942839..af46fcf8a50e 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -140,7 +140,7 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); -static void igb_set_uta(struct igb_adapter *adapter); +static void igb_set_uta(struct igb_adapter *adapter, bool set); static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *); @@ -1534,12 +1534,13 @@ static void igb_irq_enable(struct igb_adapter *adapter) static void igb_update_mng_vlan(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */ - igb_vfta_set(hw, vid, true); + igb_vfta_set(hw, vid, pf_id, true, true); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; @@ -1549,7 +1550,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) (vid != old_vid) && !test_bit(old_vid, adapter->active_vlans)) { /* remove VID from filter table */ - igb_vfta_set(hw, old_vid, false); + igb_vfta_set(hw, vid, pf_id, false, true); } } @@ -1818,6 +1819,10 @@ void igb_down(struct igb_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) igb_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); #ifdef CONFIG_IGB_DCA @@ -1862,7 +1867,7 @@ void igb_reset(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; - u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; + u32 pba, hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. @@ -1886,9 +1891,10 @@ void igb_reset(struct igb_adapter *adapter) break; } - if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && - (mac->type < e1000_82576)) { - /* adjust PBA for jumbo frames */ + if (mac->type == e1000_82575) { + u32 min_rx_space, min_tx_space, needed_tx_space; + + /* write Rx PBA so that hardware can report correct Tx PBA */ wr32(E1000_PBA, pba); /* To maintain wire speed transmits, the Tx FIFO should be @@ -1898,31 +1904,26 @@ void igb_reset(struct igb_adapter *adapter) * one full receive packet and is similarly rounded up and * expressed in KB. */ - pba = rd32(E1000_PBA); - /* upper 16 bits has Tx packet buffer allocation size in KB */ - tx_space = pba >> 16; - /* lower 16 bits has Rx packet buffer allocation size in KB */ - pba &= 0xffff; - /* the Tx fifo also stores 16 bytes of information about the Tx - * but don't include ethernet FCS because hardware appends it + min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); + + /* The Tx FIFO also stores 16 bytes of information about the Tx + * but don't include Ethernet FCS because hardware appends it. + * We only need to round down to the nearest 512 byte block + * count since the value we care about is 2 frames, not 1. */ - min_tx_space = (adapter->max_frame_size + - sizeof(union e1000_adv_tx_desc) - - ETH_FCS_LEN) * 2; - min_tx_space = ALIGN(min_tx_space, 1024); - min_tx_space >>= 10; - /* software strips receive CRC, so leave room for it */ - min_rx_space = adapter->max_frame_size; - min_rx_space = ALIGN(min_rx_space, 1024); - min_rx_space >>= 10; + min_tx_space = adapter->max_frame_size; + min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; + min_tx_space = DIV_ROUND_UP(min_tx_space, 512); + + /* upper 16 bits has Tx packet buffer allocation size in KB */ + needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); /* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO - * allocation, take space away from current Rx allocation + * allocation, take space away from current Rx allocation. */ - if (tx_space < min_tx_space && - ((min_tx_space - tx_space) < pba)) { - pba = pba - (min_tx_space - tx_space); + if (needed_tx_space < pba) { + pba -= needed_tx_space; /* if short on Rx space, Rx wins and must trump Tx * adjustment @@ -1930,18 +1931,20 @@ void igb_reset(struct igb_adapter *adapter) if (pba < min_rx_space) pba = min_rx_space; } + + /* adjust PBA for jumbo frames */ wr32(E1000_PBA, pba); } - /* flow control settings */ - /* The high water mark must be low enough to fit one full frame - * (or the size used for early receive) above it in the Rx FIFO. - * Set it to the lower of: - * - 90% of the Rx FIFO size, or - * - the full Rx FIFO size minus one full frame + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame */ - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - 2 * adapter->max_frame_size)); + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; @@ -2051,7 +2054,7 @@ static int igb_set_features(struct net_device *netdev, if (changed & NETIF_F_HW_VLAN_CTAG_RX) igb_vlan_mode(netdev, features); - if (!(changed & NETIF_F_RXALL)) + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) return 0; netdev->features = features; @@ -2064,6 +2067,25 @@ static int igb_set_features(struct net_device *netdev, return 0; } +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct igb_adapter *adapter = netdev_priv(dev); + struct e1000_hw *hw = &adapter->hw; + int vfn = adapter->vfs_allocated_count; + int rar_entries = hw->mac.rar_entry_count - (vfn + 1); + + if (netdev_uc_count(dev) >= rar_entries) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2087,6 +2109,7 @@ static const struct net_device_ops igb_netdev_ops = { #endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, + .ndo_fdb_add = igb_ndo_fdb_add, .ndo_features_check = passthru_features_check, }; @@ -2921,14 +2944,6 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, /* Device supports enough interrupts without queue pairing. */ break; case e1000_82576: - /* If VFs are going to be allocated with RSS queues then we - * should pair the queues in order to conserve interrupts due - * to limited supply. - */ - if ((adapter->rss_queues > 1) && - (adapter->vfs_allocated_count > 6)) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - /* fall through */ case e1000_82580: case e1000_i350: case e1000_i354: @@ -2939,6 +2954,8 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, */ if (adapter->rss_queues > (max_rss_queues / 2)) adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; break; } } @@ -3498,7 +3515,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) /* disable store bad packets and clear size bits. */ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); - /* enable LPE to prevent packets larger than max_frame_size */ + /* enable LPE to allow for reception of jumbo frames */ rctl |= E1000_RCTL_LPE; /* disable queue 0 to prevent tail write w/o re-config */ @@ -3522,8 +3539,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ - rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ - E1000_RCTL_DPF | /* Allow filtered pause */ + rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs. @@ -3539,12 +3555,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, struct e1000_hw *hw = &adapter->hw; u32 vmolr; - /* if it isn't the PF check to see if VFs are enabled and - * increase the size to support vlan tags - */ - if (vfn < adapter->vfs_allocated_count && - adapter->vf_data[vfn].vlans_enabled) - size += VLAN_TAG_SIZE; + if (size > MAX_JUMBO_FRAME_SIZE) + size = MAX_JUMBO_FRAME_SIZE; vmolr = rd32(E1000_VMOLR(vfn)); vmolr &= ~E1000_VMOLR_RLPML_MASK; @@ -3554,32 +3566,6 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, return 0; } -/** - * igb_rlpml_set - set maximum receive packet size - * @adapter: board private structure - * - * Configure maximum receivable packet size. - **/ -static void igb_rlpml_set(struct igb_adapter *adapter) -{ - u32 max_frame_size = adapter->max_frame_size; - struct e1000_hw *hw = &adapter->hw; - u16 pf_id = adapter->vfs_allocated_count; - - if (pf_id) { - igb_set_vf_rlpml(adapter, max_frame_size, pf_id); - /* If we're in VMDQ or SR-IOV mode, then set global RLPML - * to our max jumbo frame size, in case we need to enable - * jumbo frames on one of the rings later. - * This will not pass over-length frames into the default - * queue because it's gated by the VMOLR.RLPML. - */ - max_frame_size = MAX_JUMBO_FRAME_SIZE; - } - - wr32(E1000_RLPML, max_frame_size); -} - static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn, bool aupe) { @@ -3684,9 +3670,6 @@ static void igb_configure_rx(struct igb_adapter *adapter) { int i; - /* set UTA to appropriate mode */ - igb_set_uta(adapter); - /* set the correct pool for the PF default MAC address in entry 0 */ igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, adapter->vfs_allocated_count); @@ -4004,6 +3987,130 @@ static int igb_write_uc_addr_list(struct net_device *netdev) return count; } +static int igb_vlan_promisc_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, pf_id; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + /* VLAN filtering needed for VLAN prio filter */ + if (adapter->netdev->features & NETIF_F_NTUPLE) + break; + /* fall through */ + case e1000_82576: + case e1000_82580: + case e1000_i354: + /* VLAN filtering needed for pool filtering */ + if (adapter->vfs_allocated_count) + break; + /* fall through */ + default: + return 1; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + return 0; + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + /* Add PF to all active pools */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + vlvf |= 1 << pf_id; + wr32(E1000_VLVF(i), vlvf); + } + +set_vfta: + /* Set all bits in the VLAN filter table array */ + for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) + hw->mac.ops.write_vfta(hw, i, ~0U); + + /* Set flag so we don't redo unnecessary work */ + adapter->flags |= IGB_FLAG_VLAN_PROMISC; + + return 0; +} + +#define VFTA_BLOCK_SIZE 8 +static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits, pf_id; + + /* guarantee that we don't scrub out management VLAN */ + vid = adapter->mng_vlan_id; + if (vid >= vid_start && vid < vid_end) + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern ourselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf & E1000_VLVF_VLANID_ENABLE) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + bits = ~(1 << pf_id); + bits &= rd32(E1000_VLVF(i)); + wr32(E1000_VLVF(i), bits); + } + +set_vfta: + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); + } +} + +static void igb_vlan_promisc_disable(struct igb_adapter *adapter) +{ + u32 i; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) + igb_scrub_vfta(adapter, i); +} + /** * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -4018,21 +4125,17 @@ static void igb_set_rx_mode(struct net_device *netdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; - u32 rctl, vmolr = 0; + u32 rctl = 0, vmolr = 0; int count; /* Check for Promiscuous and All Multicast modes */ - rctl = rd32(E1000_RCTL); - - /* clear the effected bits */ - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); - if (netdev->flags & IFF_PROMISC) { - /* retain VLAN HW filtering if in VT mode */ - if (adapter->vfs_allocated_count) - rctl |= E1000_RCTL_VFE; - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + + /* enable use of UTA filter to force packets to default pool */ + if (hw->mac.type == e1000_82576) + vmolr |= E1000_VMOLR_ROPE; } else { if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; @@ -4050,17 +4153,34 @@ static void igb_set_rx_mode(struct net_device *netdev) vmolr |= E1000_VMOLR_ROMPE; } } - /* Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = igb_write_uc_addr_list(netdev); - if (count < 0) { - rctl |= E1000_RCTL_UPE; - vmolr |= E1000_VMOLR_ROPE; - } - rctl |= E1000_RCTL_VFE; } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = igb_write_uc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + + /* enable VLAN filtering by default */ + rctl |= E1000_RCTL_VFE; + + /* disable VLAN filtering for modes that require it */ + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + /* if we fail to set all rules then just clear VFE */ + if (igb_vlan_promisc_enable(adapter)) + rctl &= ~E1000_RCTL_VFE; + } else { + igb_vlan_promisc_disable(adapter); + } + + /* update state of unicast, multicast, and VLAN filtering modes */ + rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | + E1000_RCTL_VFE); wr32(E1000_RCTL, rctl); /* In order to support SR-IOV and eventually VMDq it is necessary to set @@ -4071,9 +4191,19 @@ static void igb_set_rx_mode(struct net_device *netdev) if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) return; + /* set UTA to appropriate mode */ + igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); + vmolr |= rd32(E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + + /* enable Rx jumbo frames, no need for restriction */ + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE); + igb_restore_vf_multicasts(adapter); } @@ -5088,16 +5218,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, { struct igb_adapter *adapter = netdev_priv(netdev); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ @@ -5792,125 +5912,132 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; - u32 pool_mask, reg, vid; - int i; + u32 pool_mask, vlvf_mask, i; - pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + /* create mask for VF and other pools */ + pool_mask = E1000_VLVF_POOLSEL_MASK; + vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* drop PF from pool bits */ + pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count)); /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); + for (i = E1000_VLVF_ARRAY_SIZE; i--;) { + u32 vlvf = rd32(E1000_VLVF(i)); + u32 vfta_mask, vid, vfta; /* remove the vf from the pool */ - reg &= ~pool_mask; - - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK) && - (reg & E1000_VLVF_VLANID_ENABLE)) { - reg = 0; - vid = reg & E1000_VLVF_VLANID_MASK; - igb_vfta_set(hw, vid, false); - } + if (!(vlvf & vlvf_mask)) + continue; + + /* clear out bit from VLVF */ + vlvf ^= vlvf_mask; + + /* if other pools are present, just remove ourselves */ + if (vlvf & pool_mask) + goto update_vlvfb; + + /* if PF is present, leave VFTA */ + if (vlvf & E1000_VLVF_POOLSEL_MASK) + goto update_vlvf; + + vid = vlvf & E1000_VLVF_VLANID_MASK; + vfta_mask = 1 << (vid % 32); - wr32(E1000_VLVF(i), reg); + /* clear bit from VFTA */ + vfta = adapter->shadow_vfta[vid / 32]; + if (vfta & vfta_mask) + hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); +update_vlvf: + /* clear pool selection enable */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + vlvf &= E1000_VLVF_POOLSEL_MASK; + else + vlvf = 0; +update_vlvfb: + /* clear pool bits */ + wr32(E1000_VLVF(i), vlvf); } +} + +static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; - adapter->vf_data[vf].vlans_enabled = 0; + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the VLAN id in the VLVF entries */ + for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { + vlvf = rd32(E1000_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; } -static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) +void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) { struct e1000_hw *hw = &adapter->hw; - u32 reg, i; - - /* The vlvf table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return -1; + u32 bits, pf_id; + int idx; - /* we only need to do this if VMDq is enabled */ - if (!adapter->vfs_allocated_count) - return -1; + idx = igb_find_vlvf_entry(hw, vid); + if (!idx) + return; - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK; + bits &= rd32(E1000_VLVF(idx)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits) { + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + wr32(E1000_VLVF(idx), 1 << pf_id); + else + wr32(E1000_VLVF(idx), 0); } +} - if (add) { - if (i == E1000_VLVF_ARRAY_SIZE) { - /* Did not find a matching VLAN ID entry that was - * enabled. Search for a free filter entry, i.e. - * one without the enable bit set - */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if (!(reg & E1000_VLVF_VLANID_ENABLE)) - break; - } - } - if (i < E1000_VLVF_ARRAY_SIZE) { - /* Found an enabled/available entry */ - reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); - - /* if !enabled we need to set this up in vfta */ - if (!(reg & E1000_VLVF_VLANID_ENABLE)) { - /* add VID to filter table */ - igb_vfta_set(hw, vid, true); - reg |= E1000_VLVF_VLANID_ENABLE; - } - reg &= ~E1000_VLVF_VLANID_MASK; - reg |= vid; - wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size += 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } +static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, + bool add, u32 vf) +{ + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + int err; - adapter->vf_data[vf].vlans_enabled++; - } - } else { - if (i < E1000_VLVF_ARRAY_SIZE) { - /* remove vf from the pool */ - reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK)) { - reg = 0; - igb_vfta_set(hw, vid, false); - } - wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - adapter->vf_data[vf].vlans_enabled--; - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size -= 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } - } + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = igb_vfta_set(hw, vid, pf_id, true, false); + if (err) + return err; } - return 0; + + err = igb_vfta_set(hw, vid, vf, add, false); + + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_update_pf_vlvf(adapter, vid); + + return err; } static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) @@ -5923,130 +6050,97 @@ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) wr32(E1000_VMVIR(vf), 0); } -static int igb_ndo_set_vf_vlan(struct net_device *netdev, - int vf, u16 vlan, u8 qos) +static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, + u16 vlan, u8 qos) { - int err = 0; - struct igb_adapter *adapter = netdev_priv(netdev); + int err; - if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (vlan || qos) { - err = igb_vlvf_set(adapter, vlan, !!vlan, vf); - if (err) - goto out; - igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); - igb_set_vmolr(adapter, vf, !vlan); - adapter->vf_data[vf].pf_vlan = vlan; - adapter->vf_data[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before attempting to use the VF device.\n"); - } - } else { - igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, - false, vf); - igb_set_vmvir(adapter, vlan, vf); - igb_set_vmolr(adapter, vf, true); - adapter->vf_data[vf].pf_vlan = 0; - adapter->vf_data[vf].pf_qos = 0; + err = igb_set_vf_vlan(adapter, vlan, true, vf); + if (err) + return err; + + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + + /* revoke access to previous VLAN */ + if (vlan != adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); } -out: + return err; } -static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) +static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) { - struct e1000_hw *hw = &adapter->hw; - int i; - u32 reg; + /* Restore tagless access via VLAN 0 */ + igb_set_vf_vlan(adapter, 0, true, vf); - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; - } + igb_set_vmvir(adapter, 0, vf); + igb_set_vmolr(adapter, vf, true); - if (i >= E1000_VLVF_ARRAY_SIZE) - i = -1; + /* Remove any PF assigned VLAN */ + if (adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; - return i; + return 0; } -static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos) { - struct e1000_hw *hw = &adapter->hw; - int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; - int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); - int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); - /* If in promiscuous mode we need to make sure the PF also has - * the VLAN filter set. - */ - if (add && (adapter->netdev->flags & IFF_PROMISC)) - err = igb_vlvf_set(adapter, vid, add, - adapter->vfs_allocated_count); - if (err) - goto out; + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; - err = igb_vlvf_set(adapter, vid, add, vf); + return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : + igb_disable_port_vlan(adapter, vf); +} - if (err) - goto out; +static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); - /* Go through all the checks to see if the VLAN filter should - * be wiped completely. - */ - if (!add && (adapter->netdev->flags & IFF_PROMISC)) { - u32 vlvf, bits; - int regndx = igb_find_vlvf_entry(adapter, vid); - - if (regndx < 0) - goto out; - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - vlvf = bits = rd32(E1000_VLVF(regndx)); - bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + - adapter->vfs_allocated_count); - /* If the filter was removed then ensure PF pool bit - * is cleared if the PF only added itself to the pool - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && - !test_bit(vid, adapter->active_vlans) && - !bits) - igb_vlvf_set(adapter, vid, add, - adapter->vfs_allocated_count); - } + if (adapter->vf_data[vf].pf_vlan) + return -1; -out: - return err; + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return igb_set_vf_vlan(adapter, vid, !!add, vf); } static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) { - /* clear flags - except flag that indicates PF has set the MAC */ - adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; - adapter->vf_data[vf].last_nack = jiffies; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - /* reset offloads to defaults */ - igb_set_vmolr(adapter, vf, true); + /* clear flags - except flag that indicates PF has set the MAC */ + vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; + vf_data->last_nack = jiffies; /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); - if (adapter->vf_data[vf].pf_vlan) - igb_ndo_set_vf_vlan(adapter->netdev, vf, - adapter->vf_data[vf].pf_vlan, - adapter->vf_data[vf].pf_qos); - else - igb_clear_vf_vfta(adapter, vf); + igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); + igb_set_vmvir(adapter, vf_data->pf_vlan | + (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; @@ -6191,7 +6285,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", vf); else - retval = igb_set_vf_vlan(adapter, msgbuf, vf); + retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); break; default: dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); @@ -6233,6 +6327,7 @@ static void igb_msg_task(struct igb_adapter *adapter) /** * igb_set_uta - Set unicast filter table address * @adapter: board private structure + * @set: boolean indicating if we are setting or clearing bits * * The unicast table address is a register array of 32-bit registers. * The table is meant to be used in a way similar to how the MTA is used @@ -6240,21 +6335,18 @@ static void igb_msg_task(struct igb_adapter *adapter) * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ -static void igb_set_uta(struct igb_adapter *adapter) +static void igb_set_uta(struct igb_adapter *adapter, bool set) { struct e1000_hw *hw = &adapter->hw; + u32 uta = set ? ~0 : 0; int i; - /* The UTA table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return; - /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return; - for (i = 0; i < hw->mac.uta_reg_count; i++) - array_wr32(E1000_UTA, i, ~0); + for (i = hw->mac.uta_reg_count; i--;) + array_wr32(E1000_UTA, i, uta); } /** @@ -7201,8 +7293,6 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) ctrl &= ~E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); } - - igb_rlpml_set(adapter); } static int igb_vlan_rx_add_vid(struct net_device *netdev, @@ -7212,11 +7302,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; - /* attempt to add filter to vlvf array */ - igb_vlvf_set(adapter, vid, true, pf_id); - /* add the filter since PF can receive vlans w/o entry in vlvf */ - igb_vfta_set(hw, vid, true); + if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, true, !!vid); set_bit(vid, adapter->active_vlans); @@ -7227,16 +7315,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; - s32 err; - - /* remove vlan from VLVF table array */ - err = igb_vlvf_set(adapter, vid, false, pf_id); + struct e1000_hw *hw = &adapter->hw; - /* if vid was not present in VLVF just remove it from table */ - if (err) - igb_vfta_set(hw, vid, false); + /* remove VID from filter table */ + if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, false, true); clear_bit(vid, adapter->active_vlans); @@ -7245,11 +7329,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, static void igb_restore_vlan(struct igb_adapter *adapter) { - u16 vid; + u16 vid = 1; igb_vlan_mode(adapter->netdev, adapter->netdev->features); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } @@ -7704,15 +7789,14 @@ static void igb_io_resume(struct pci_dev *pdev) static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, u8 qsel) { - u32 rar_low, rar_high; struct e1000_hw *hw = &adapter->hw; + u32 rar_low, rar_high; /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian + * from network order (big endian) to CPU endian */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + rar_low = le32_to_cpup((__be32 *)(addr)); + rar_high = le16_to_cpup((__be16 *)(addr + 4)); /* Indicate to hardware the Address is Valid. */ rar_high |= E1000_RAH_AV; @@ -7959,9 +8043,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) * than the Rx threshold. Set hwm to PBA - max frame * size in 16B units, capping it at PBA - 6KB. */ - hwm = 64 * pba - adapter->max_frame_size / 16; - if (hwm < 64 * (pba - 6)) - hwm = 64 * (pba - 6); + hwm = 64 * (pba - 6); reg = rd32(E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) @@ -7971,9 +8053,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* Set the DMA Coalescing Rx threshold to PBA - 2 * max * frame size, capping it at PBA - 10KB. */ - dmac_thr = pba - adapter->max_frame_size / 512; - if (dmac_thr < pba - 10) - dmac_thr = pba - 10; + dmac_thr = pba - 10; reg = rd32(E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 7b6cb4c3764c..01752f44ace2 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -234,13 +234,19 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; - - /* Take ownership of the buffer */ - ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - - /* reserve mailbox for VF use */ - if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) - ret_val = E1000_SUCCESS; + int count = 10; + + do { + /* Take ownership of the buffer */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for VF use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); return ret_val; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4b9156cd8b93..84fa28ceb200 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -796,6 +796,10 @@ struct ixgbe_adapter { u8 default_up; unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ +#define IXGBE_MAX_LINK_HANDLE 10 + struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; + unsigned long tables; + /* maximum number of RETA entries among all devices supported by ixgbe * driver: currently it's x550 device in non-SRIOV mode */ @@ -925,6 +929,9 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id); void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx); void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index a507a6fe3624..02c7333a9c83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -139,6 +139,11 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ @@ -149,7 +154,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ - if (credit_max && (credit_max < min_credit)) + if (credit_max < min_credit) credit_max = min_credit; if (direction == DCB_TX_CONFIG) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 23277ab153b6..b5cc989a3d23 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -223,13 +223,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) reg |= IXGBE_MFLCN_DPF; /* - * X540 supports per TC Rx priority flow control. So - * clear all TCs and only enable those that should be + * X540 & X550 supports per TC Rx priority flow control. + * So clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); - if (hw->mac.type == ixgbe_mac_X540) + if (hw->mac.type >= ixgbe_mac_X540) reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; if (pfc_en) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 2448eba2eecd..726e0eeee63b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -185,9 +185,7 @@ static int ixgbe_get_settings(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; - u32 link_speed = 0; bool autoneg = false; - bool link_up; hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); @@ -313,9 +311,8 @@ static int ixgbe_get_settings(struct net_device *netdev, break; } - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) { - switch (link_speed) { + if (netif_carrier_ok(netdev)) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; @@ -2523,9 +2520,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ixgbe_fdir_filter *input, - u16 sw_idx) +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) { struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 5f988703e1b7..2a653ec954f5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -77,7 +77,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (!netdev) return 0; - if (xid >= IXGBE_FCOE_DDP_MAX) + if (xid >= netdev->fcoe_ddp_xid) return 0; adapter = netdev_priv(netdev); @@ -177,7 +177,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 0; adapter = netdev_priv(netdev); - if (xid >= IXGBE_FCOE_DDP_MAX) { + if (xid >= netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } @@ -517,6 +517,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; + u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; u8 sof, eof; if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { @@ -593,6 +594,8 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; first->tx_flags |= IXGBE_TX_FLAGS_TSO; + /* Hardware expects L4T to be RSV for FCoE TSO */ + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; } /* set flag indicating FCOE to ixgbe_tx_map call */ @@ -610,7 +613,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, /* write context desc */ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, - IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); + type_tucmd, mss_l4len_idx); return 0; } @@ -996,8 +999,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, return -EINVAL; /* Don't return information on unsupported devices */ - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X540) + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return -EINVAL; /* Manufacturer */ @@ -1043,6 +1045,10 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, snprintf(info->model, sizeof(info->model), "Intel 82599"); + } else if (hw->mac.type == ixgbe_mac_X550) { + snprintf(info->model, + sizeof(info->model), + "Intel X550"); } else { snprintf(info->model, sizeof(info->model), diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ea9537d0e63a..cf4b729c92d7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -51,6 +51,8 @@ #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> #include <net/vxlan.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> #ifdef CONFIG_OF #include <linux/of_net.h> @@ -65,6 +67,7 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" +#include "ixgbe_model.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -1089,7 +1092,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) * @tx_ring: tx ring to clean **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring, int napi_budget) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; @@ -1127,7 +1130,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1483,7 +1486,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { - ring->rx_stats.csum_err++; + skb->ip_summed = CHECKSUM_NONE; return; } /* If we checked the outer header let the stack know */ @@ -2784,7 +2787,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) #endif ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); + clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget); /* Exit if we are called by netpoll or busy polling is active */ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) @@ -3619,6 +3622,9 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); + /* Force flushing of IXGBE_RDLEN to prevent MDD */ + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); @@ -5542,6 +5548,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #endif /* CONFIG_IXGBE_DCB */ #endif /* IXGBE_FCOE */ + /* initialize static ixgbe jump table entries */ + adapter->jump_tables[0] = ixgbe_ipv4_fields; + adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, GFP_ATOMIC); @@ -7570,7 +7579,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); - if (skb->encapsulation) { + if (!skb->encapsulation) { + th = tcp_hdr(skb); + } else { #ifdef CONFIG_IXGBE_VXLAN struct ixgbe_adapter *adapter = q_vector->adapter; @@ -7589,14 +7600,34 @@ static void ixgbe_atr(struct ixgbe_ring *ring, #else return; #endif /* CONFIG_IXGBE_VXLAN */ - } else { - /* Currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IPV6) || - hdr.ipv6->nexthdr != IPPROTO_TCP) && - (first->protocol != htons(ETH_P_IP) || - hdr.ipv4->protocol != IPPROTO_TCP)) + } + + /* Currently only IPv4/IPv6 with TCP is supported */ + switch (hdr.ipv4->version) { + case IPVERSION: + if (hdr.ipv4->protocol != IPPROTO_TCP) return; - th = tcp_hdr(skb); + break; + case 6: + if (likely((unsigned char *)th - hdr.network == + sizeof(struct ipv6hdr))) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + } else { + __be16 frag_off; + u8 l4_hdr; + + ipv6_skip_exthdr(skb, hdr.network - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + return; + if (l4_hdr != IPPROTO_TCP) + return; + } + break; + default: + return; } /* skip this packet since it is invalid or the socket is closing */ @@ -7631,10 +7662,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring, common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; - if (first->protocol == htons(ETH_P_IP)) { + switch (hdr.ipv4->version) { + case IPVERSION: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; - } else { + break; + case 6: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ hdr.ipv6->saddr.s6_addr32[1] ^ @@ -7644,6 +7677,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.ipv6->daddr.s6_addr32[1] ^ hdr.ipv6->daddr.s6_addr32[2] ^ hdr.ipv6->daddr.s6_addr32[3]; + break; + default: + break; } #ifdef CONFIG_IXGBE_VXLAN @@ -8170,6 +8206,228 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } +static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); + spin_unlock(&adapter->fdir_perfect_lock); + return err; +} + +static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + /* This ixgbe devices do not support hash tables at the moment + * so abort when given hash tables. + */ + if (cls->hnode.divisor > 0) + return -EINVAL; + + set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); + return 0; +} + +static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); + return 0; +} + +static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + u32 loc = cls->knode.handle & 0xfffff; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_mat_field *field_ptr; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input mask; +#ifdef CONFIG_NET_CLS_ACT + const struct tc_action *a; +#endif + int i, err = 0; + u8 queue; + u32 handle; + + memset(&mask, 0, sizeof(union ixgbe_atr_input)); + handle = cls->knode.handle; + + /* At the moment cls_u32 jumps to transport layer and skips past + * L2 headers. The canonical method to match L2 frames is to use + * negative values. However this is error prone at best but really + * just broken because there is no way to "know" what sort of hdr + * is in front of the transport layer. Fix cls_u32 to support L2 + * headers when needed. + */ + if (protocol != htons(ETH_P_IP)) + return -EINVAL; + + if (cls->knode.link_handle || + cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; + u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + if (!test_bit(uhtid, &adapter->tables)) + return -EINVAL; + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr->o != cls->knode.sel->offoff || + nexthdr->s != cls->knode.sel->offshift || + nexthdr->m != cls->knode.sel->offmask || + /* do not support multiple key jumps its just mad */ + cls->knode.sel->nkeys > 1) + return -EINVAL; + + if (nexthdr->off != cls->knode.sel->keys[0].off || + nexthdr->val != cls->knode.sel->keys[0].val || + nexthdr->mask != cls->knode.sel->keys[0].mask) + return -EINVAL; + + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return -EINVAL; + + adapter->jump_tables[uhtid] = nexthdr->jump; + } + return 0; + } + + if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + /* cls u32 is a graph starting at root node 0x800. The driver tracks + * links and also the fields used to advance the parser across each + * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map + * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h + * To add support for new nodes update ixgbe_model.h parse structures + * this function _should_ be generic try not to hardcode values here. + */ + if (TC_U32_USERHTID(handle) == 0x800) { + field_ptr = adapter->jump_tables[0]; + } else { + if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) + return -EINVAL; + + field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; + } + + if (!field_ptr) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + int off = cls->knode.sel->keys[i].off; + __be32 val = cls->knode.sel->keys[i].val; + __be32 m = cls->knode.sel->keys[i].mask; + bool found_entry = false; + int j; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off && + field_ptr[j].mask == m) { + field_ptr[j].val(input, &mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + + if (!found_entry) + goto err_out; + } + + mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + +#ifdef CONFIG_NET_CLS_ACT + if (list_empty(&cls->knode.exts->actions)) + goto err_out; + + list_for_each_entry(a, &cls->knode.exts->actions, list) { + if (!is_tcf_gact_shot(a)) + goto err_out; + } +#endif + + input->action = IXGBE_FDIR_DROP_QUEUE; + queue = IXGBE_FDIR_DROP_QUEUE; + input->sw_idx = loc; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + if (err) + goto err_out_w_lock; + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + err = -EINVAL; + goto err_out_w_lock; + } + + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, + input->sw_idx, queue); + if (!err) + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && + tc->type == TC_SETUP_CLSU32) { + if (!(dev->features & NETIF_F_HW_TC)) + return -EINVAL; + + switch (tc->cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, + proto, tc->cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, tc->cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, proto, + tc->cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, + tc->cls_u32); + default: + return -EINVAL; + } + } + + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return ixgbe_setup_tc(dev, tc->tc); +} + #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) { @@ -8232,19 +8490,17 @@ static int ixgbe_set_features(struct net_device *netdev, } /* - * Check if Flow Director n-tuple support was enabled or disabled. If - * the state changed, we need to reset. + * Check if Flow Director n-tuple support or hw_tc support was + * enabled or disabled. If the state changed, we need to reset. */ - switch (features & NETIF_F_NTUPLE) { - case NETIF_F_NTUPLE: + if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { /* turn off ATR, enable perfect filters and reset */ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) need_reset = true; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - break; - default: + } else { /* turn off perfect filters, enable ATR and reset */ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) need_reset = true; @@ -8252,23 +8508,16 @@ static int ixgbe_set_features(struct net_device *netdev, adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; /* We cannot enable ATR if SR-IOV is enabled */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - break; - - /* We cannot enable ATR if we have 2 or more traffic classes */ - if (netdev_get_num_tc(netdev) > 1) - break; - - /* We cannot enable ATR if RSS is disabled */ - if (adapter->ring_feature[RING_F_RSS].limit <= 1) - break; - - /* A sample rate of 0 indicates ATR disabled */ - if (!adapter->atr_sample_rate) - break; - - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - break; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || + /* We cannot enable ATR if we have 2 or more tcs */ + (netdev_get_num_tc(netdev) > 1) || + /* We cannot enable ATR if RSS is disabled */ + (adapter->ring_feature[RING_F_RSS].limit <= 1) || + /* A sample rate of 0 indicates ATR disabled */ + (!adapter->atr_sample_rate)) + ; /* do nothing not supported */ + else /* otherwise supported and set the flag */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } if (features & NETIF_F_HW_VLAN_CTAG_RX) @@ -8627,9 +8876,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, -#ifdef CONFIG_IXGBE_DCB - .ndo_setup_tc = ixgbe_setup_tc, -#endif + .ndo_setup_tc = __ixgbe_setup_tc, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif @@ -9000,7 +9247,8 @@ skip_sriov: case ixgbe_mac_X550EM_x: netdev->features |= NETIF_F_SCTP_CRC; netdev->hw_features |= NETIF_F_SCTP_CRC | - NETIF_F_NTUPLE; + NETIF_F_NTUPLE | + NETIF_F_HW_TC; break; default: break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h new file mode 100644 index 000000000000..ce48872d4782 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -0,0 +1,112 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux drive + * Copyright(c) 2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _IXGBE_MODEL_H_ +#define _IXGBE_MODEL_H_ + +#include "ixgbe.h" +#include "ixgbe_type.h" + +struct ixgbe_mat_field { + unsigned int off; + unsigned int mask; + int (*val)(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m); + unsigned int type; +}; + +static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.src_ip[0] = val; + mask->formatted.src_ip[0] = m; + return 0; +} + +static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.dst_ip[0] = val; + mask->formatted.dst_ip[0] = m; + return 0; +} + +static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { + { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .val = NULL } /* terminal node */ +}; + +static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.src_port = val & 0xffff; + mask->formatted.src_port = m & 0xffff; + return 0; +}; + +static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.dst_port = val & 0xffff; + mask->formatted.dst_port = m & 0xffff; + return 0; +}; + +static struct ixgbe_mat_field ixgbe_tcp_fields[] = { + {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, + .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, + {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport, + .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, + { .val = NULL } /* terminal node */ +}; + +struct ixgbe_nexthdr { + /* offset, shift, and mask of position to next header */ + unsigned int o; + u32 s; + u32 m; + /* match criteria to make this jump*/ + unsigned int off; + u32 val; + u32 mask; + /* location of jump to make */ + struct ixgbe_mat_field *jump; +}; + +static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, + { .jump = NULL } /* terminal node */ +}; +#endif /* _IXGBE_MODEL_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 5f53cc6c609a..bf7367a08716 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2780,6 +2780,7 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ #define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 581928c068f2..b630ef1e9646 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -375,22 +375,16 @@ static int ltq_etop_mdio_probe(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; + struct phy_device *phydev; - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (priv->mii_bus->phy_map[phy_addr]) { - phydev = priv->mii_bus->phy_map[phy_addr]; - break; - } - } + phydev = phy_find_first(priv->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); return -ENODEV; } - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), <q_etop_mdio_link, priv->pldata->mii_mode); if (IS_ERR(phydev)) { @@ -408,9 +402,7 @@ ltq_etop_mdio_probe(struct net_device *dev) phydev->advertising = phydev->supported; priv->phydev = phydev; - pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, - dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; } @@ -435,18 +427,9 @@ ltq_etop_mdio_init(struct net_device *dev) priv->mii_bus->name = "ltq_mii"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", priv->pdev->name, priv->pdev->id); - priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; ++i) - priv->mii_bus->irq[i] = PHY_POLL; - if (mdiobus_register(priv->mii_bus)) { err = -ENXIO; - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; } if (ltq_etop_mdio_probe(dev)) { @@ -457,8 +440,6 @@ ltq_etop_mdio_init(struct net_device *dev) err_out_unregister_bus: mdiobus_unregister(priv->mii_bus); -err_out_free_mdio_irq: - kfree(priv->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(priv->mii_bus); err_out: @@ -472,7 +453,6 @@ ltq_etop_mdio_cleanup(struct net_device *dev) phy_disconnect(priv->phydev); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4eba2ed53052..55831188bc32 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, if (length <= 8 && (uintptr_t)data & 0x7) { /* Copy unaligned small data fragment to TSO header data area */ - memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, + memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, data, length); desc->buf_ptr = txq->tso_hdrs_dma - + txq->tx_curr_desc * TSO_HEADER_SIZE; + + tx_index * TSO_HEADER_SIZE; } else { /* Alignment is okay, map buffer and hand off to hardware */ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; @@ -3133,7 +3133,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (!mp->phy) err = -ENODEV; else - phy_addr_set(mp, mp->phy->addr); + phy_addr_set(mp, mp->phy->mdio.addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index fc2fb25343f4..8982c882af1b 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -187,7 +187,7 @@ static int orion_mdio_probe(struct platform_device *pdev) struct resource *r; struct mii_bus *bus; struct orion_mdio_dev *dev; - int i, ret; + int ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -207,14 +207,6 @@ static int orion_mdio_probe(struct platform_device *pdev) dev_name(&pdev->dev)); bus->parent = &pdev->dev; - bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!bus->irq) - return -ENOMEM; - - for (i = 0; i < PHY_MAX_ADDR; i++) - bus->irq[i] = PHY_POLL; - dev = bus->priv; dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!dev->regs) { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 15b1f6bbd92d..662c2ee268c7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -11,28 +11,28 @@ * warranty of any kind, whether express or implied. */ -#include <linux/kernel.h> -#include <linux/netdevice.h> +#include <linux/clk.h> +#include <linux/cpu.h> #include <linux/etherdevice.h> -#include <linux/platform_device.h> -#include <linux/skbuff.h> +#include <linux/if_vlan.h> #include <linux/inetdevice.h> -#include <linux/mbus.h> -#include <linux/module.h> #include <linux/interrupt.h> -#include <linux/if_vlan.h> -#include <net/ip.h> -#include <net/ipv6.h> #include <linux/io.h> -#include <net/tso.h> +#include <linux/kernel.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_address.h> #include <linux/phy.h> -#include <linux/clk.h> -#include <linux/cpu.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/tso.h> /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) @@ -373,6 +373,8 @@ struct mvneta_port { /* Core clock */ struct clk *clk; + /* AXI clock */ + struct clk *clk_bus; u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; @@ -3242,26 +3244,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) const struct mvneta_statistic *s; void __iomem *base = pp->base; u32 high, low, val; + u64 val64; int i; for (i = 0, s = mvneta_statistics; s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s++, i++) { - val = 0; - switch (s->type) { case T_REG_32: val = readl_relaxed(base + s->offset); + pp->ethtool_stats[i] += val; break; case T_REG_64: /* Docs say to read low 32-bit then high */ low = readl_relaxed(base + s->offset); high = readl_relaxed(base + s->offset + 4); - val = (u64)high << 32 | low; + val64 = (u64)high << 32 | low; + pp->ethtool_stats[i] += val64; break; } - - pp->ethtool_stats[i] += val; } } @@ -3605,7 +3606,9 @@ static int mvneta_probe(struct platform_device *pdev) pp->indir[0] = rxq_def; - pp->clk = devm_clk_get(&pdev->dev, NULL); + pp->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(pp->clk)) + pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); goto err_put_phy_node; @@ -3613,6 +3616,10 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk); + pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(pp->clk_bus)) + clk_prepare_enable(pp->clk_bus); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pp->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pp->base)) { @@ -3714,7 +3721,7 @@ static int mvneta_probe(struct platform_device *pdev) mvneta_fixed_link_update(pp, phy); - put_device(&phy->dev); + put_device(&phy->mdio.dev); } return 0; @@ -3724,6 +3731,7 @@ err_free_stats: err_free_ports: free_percpu(pp->ports); err_clk: + clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); err_put_phy_node: of_node_put(phy_node); @@ -3741,6 +3749,7 @@ static int mvneta_remove(struct platform_device *pdev) struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); + clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0c7e3f69a73b..01d6a9695586 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -69,6 +69,15 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } +static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return mlx4_en_setup_tc(dev, tc->tc); +} + #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -2466,7 +2475,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, - .ndo_setup_tc = mlx4_en_setup_tc, + .ndo_setup_tc = __mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif @@ -2504,7 +2513,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { #endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, - .ndo_setup_tc = mlx4_en_setup_tc, + .ndo_setup_tc = __mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2c2baab9d880..d66c690a8597 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -157,6 +157,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [29] = "802.1ad offload support", [31] = "Modifying loopback source checks using UPDATE_QP support", [32] = "Loopback source checks support", + [33] = "RoCEv2 support" }; int i; @@ -626,6 +627,8 @@ out: return err; } +static void disable_unsupported_roce_caps(void *buf); + int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { struct mlx4_cmd_mailbox *mailbox; @@ -738,6 +741,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) if (err) goto out; + if (mlx4_is_mfunc(dev)) + disable_unsupported_roce_caps(outbox); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); dev_cap->reserved_qps = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); @@ -905,6 +910,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2; if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); @@ -1161,6 +1168,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, if (err) return err; + disable_unsupported_roce_caps(outbox->buf); /* add port mng change event capability and disable mw type 1 * unconditionally to slaves */ @@ -1258,6 +1266,21 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, return 0; } +static void disable_unsupported_roce_caps(void *buf) +{ + u32 flags; + + MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + flags &= ~(1UL << 31); + MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + flags &= ~(1UL << 24); + MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + flags &= ~(MLX4_FLAG_ROCE_V1_V2); + MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); +} + int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -2239,7 +2262,8 @@ struct mlx4_config_dev { __be32 rsvd1[3]; __be16 vxlan_udp_dport; __be16 rsvd2; - __be32 rsvd3; + __be16 roce_v2_entropy; + __be16 roce_v2_udp_dport; __be32 roce_flags; __be32 rsvd4[25]; __be16 rsvd5; @@ -2248,6 +2272,7 @@ struct mlx4_config_dev { }; #define MLX4_VXLAN_UDP_DPORT (1 << 0) +#define MLX4_ROCE_V2_UDP_DPORT BIT(3) #define MLX4_DISABLE_RX_PORT BIT(18) static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) @@ -2365,6 +2390,18 @@ int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) return mlx4_CONFIG_DEV_set(dev, &config_dev); } +int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT); + config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port); + + return mlx4_CONFIG_DEV_set(dev, &config_dev); +} +EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port); + int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) { struct mlx4_cmd_mailbox *mailbox; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 2404c22ad2b2..7baef52db6b7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -780,7 +780,10 @@ struct mlx4_set_port_general_context { u16 reserved1; u8 v_ignore_fcs; u8 flags; - u8 ignore_fcs; + union { + u8 ignore_fcs; + u8 roce_mode; + }; u8 reserved2; __be16 mtu; u8 pptx; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index f2550425c251..787b7bb54d52 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1520,6 +1520,8 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) return err; } +#define SET_PORT_ROCE_2_FLAGS 0x10 +#define MLX4_SET_PORT_ROCE_V1_V2 0x2 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) { @@ -1539,6 +1541,11 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, context->pprx = (pprx * (!pfcrx)) << 7; context->pfcrx = pfcrx; + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { + context->flags |= SET_PORT_ROCE_2_FLAGS; + context->roce_mode |= + MLX4_SET_PORT_ROCE_V1_V2 << 4; + } in_mod = MLX4_SET_PORT_GENERAL << 8 | port; err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 168823dde79f..d1cd9c32a9ae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -167,6 +167,12 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; } + if ((cur_state == MLX4_QP_STATE_RTR) && + (new_state == MLX4_QP_STATE_RTS) && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) + context->roce_entropy = + cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn)); + *(__be32 *) mailbox->buf = cpu_to_be32(optpar); memcpy(mailbox->buf + 8, context, sizeof *context); @@ -921,3 +927,23 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, return 0; } EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); + +u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn) +{ + struct mlx4_qp_context context; + struct mlx4_qp qp; + int err; + + qp.qpn = qpn; + err = mlx4_qp_query(dev, &qp, &context); + if (!err) { + u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff; + u16 folded_dst = folded_qp(dest_qpn); + u16 folded_src = folded_qp(qpn); + + return (dest_qpn != qpn) ? + ((folded_dst ^ folded_src) | 0xC000) : + folded_src | 0xC000; + } + return 0xdead; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9ea49a893323..aac071a7e830 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -39,8 +39,8 @@ #include <linux/mlx5/qp.h> #include <linux/mlx5/cq.h> #include <linux/mlx5/vport.h> +#include <linux/mlx5/transobj.h> #include "wq.h" -#include "transobj.h" #include "mlx5_core.h" #define MLX5E_MAX_NUM_TC 8 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5c74a734f158..6a3e430f1062 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -752,7 +752,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int eqn_not_used; - int irqn; + unsigned int irqn; int err; u32 i; @@ -806,7 +806,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) void *in; void *cqc; int inlen; - int irqn_not_used; + unsigned int irqn_not_used; int eqn; int err; @@ -1517,7 +1517,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int eqn_not_used; - int irqn; + unsigned int irqn; int err; err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, @@ -2241,7 +2241,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_unmap_free_uar; } - err = mlx5_alloc_transport_domain(mdev, &priv->tdn); + err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn); if (err) { mlx5_core_err(mdev, "alloc td failed, %d\n", err); goto err_dealloc_pd; @@ -2324,7 +2324,7 @@ err_destroy_mkey: mlx5_core_destroy_mkey(mdev, &priv->mr); err_dealloc_transport_domain: - mlx5_dealloc_transport_domain(mdev, priv->tdn); + mlx5_core_dealloc_transport_domain(mdev, priv->tdn); err_dealloc_pd: mlx5_core_dealloc_pd(mdev, priv->pdn); @@ -2356,7 +2356,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5e_close_drop_rq(priv); mlx5e_destroy_tises(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->mr); - mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); + mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); free_netdev(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 23c244a7e5d7..647a3ca2c2a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -230,6 +230,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", eqe_type_str(eqe->type), eqe->type, rsn); mlx5_rsc_event(dev, rsn, eqe->type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 5096f4f336bd..a9894d2e8e26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -38,9 +38,28 @@ #include "fs_cmd.h" #include "mlx5_core.h" +int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); + MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, enum fs_flow_table_type type, unsigned int level, - unsigned int log_size, unsigned int *table_id) + unsigned int log_size, struct mlx5_flow_table + *next_ft, unsigned int *table_id) { u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; @@ -51,6 +70,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); + if (next_ft) { + MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); + } MLX5_SET(create_flow_table_in, in, table_type, type); MLX5_SET(create_flow_table_in, in, level, level); MLX5_SET(create_flow_table_in, in, log_size, log_size); @@ -83,6 +106,33 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, sizeof(out)); } +int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(modify_flow_table_in, in, opcode, + MLX5_CMD_OP_MODIFY_FLOW_TABLE); + MLX5_SET(modify_flow_table_in, in, table_type, ft->type); + MLX5_SET(modify_flow_table_in, in, table_id, ft->id); + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + } + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, u32 *in, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index f39304ede186..9814d4784803 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -35,11 +35,16 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, enum fs_flow_table_type type, unsigned int level, - unsigned int log_size, unsigned int *table_id); + unsigned int log_size, struct mlx5_flow_table + *next_ft, unsigned int *table_id); int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft); +int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft); + int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, u32 *in, unsigned int *group_id); @@ -62,4 +67,6 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned int index); +int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f7d62fe595f6..6f68dba8d7ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -40,51 +40,83 @@ #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) -#define INIT_PRIO(min_level_val, max_ft_val,\ - start_level_val, ...) {.type = FS_TYPE_PRIO,\ +#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\ + ...) {.type = FS_TYPE_PRIO,\ .min_ft_level = min_level_val,\ - .start_level = start_level_val,\ .max_ft = max_ft_val,\ + .num_leaf_prios = num_prios_val,\ + .caps = caps_val,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } -#define ADD_PRIO(min_level_val, max_ft_val, start_level_val, ...)\ - INIT_PRIO(min_level_val, max_ft_val, start_level_val,\ - __VA_ARGS__)\ - -#define ADD_FT_PRIO(max_ft_val, start_level_val, ...)\ - INIT_PRIO(0, max_ft_val, start_level_val,\ - __VA_ARGS__)\ +#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\ + ADD_PRIO(num_prios_val, 0, max_ft_val, {},\ + __VA_ARGS__)\ #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } -#define KERNEL_START_LEVEL 0 -#define KERNEL_P0_START_LEVEL KERNEL_START_LEVEL +#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\ + sizeof(long)) + +#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap)) + +#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ + .caps = (long[]) {__VA_ARGS__} } + +#define LEFTOVERS_MAX_FT 1 +#define LEFTOVERS_NUM_PRIOS 1 +#define BY_PASS_PRIO_MAX_FT 1 +#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ + LEFTOVERS_MAX_FT) + #define KERNEL_MAX_FT 2 +#define KERNEL_NUM_PRIOS 1 #define KENREL_MIN_LEVEL 2 + +struct node_caps { + size_t arr_sz; + long *caps; +}; static struct init_tree_node { enum fs_node_type type; struct init_tree_node *children; int ar_size; + struct node_caps caps; int min_ft_level; + int num_leaf_prios; int prio; int max_ft; - int start_level; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 1, + .ar_size = 3, .children = (struct init_tree_node[]) { - ADD_PRIO(KENREL_MIN_LEVEL, KERNEL_MAX_FT, - KERNEL_START_LEVEL, - ADD_NS(ADD_FT_PRIO(KERNEL_MAX_FT, - KERNEL_P0_START_LEVEL))), + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, + FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), + FS_CAP(flow_table_properties_nic_receive.modify_root), + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))), + ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))), + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, + FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), + FS_CAP(flow_table_properties_nic_receive.modify_root), + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))), } }; +enum fs_i_mutex_lock_class { + FS_MUTEX_GRANDPARENT, + FS_MUTEX_PARENT, + FS_MUTEX_CHILD +}; + static void del_rule(struct fs_node *node); static void del_flow_table(struct fs_node *node); static void del_flow_group(struct fs_node *node); @@ -119,10 +151,11 @@ static void tree_get_node(struct fs_node *node) atomic_inc(&node->refcount); } -static void nested_lock_ref_node(struct fs_node *node) +static void nested_lock_ref_node(struct fs_node *node, + enum fs_i_mutex_lock_class class) { if (node) { - mutex_lock_nested(&node->lock, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&node->lock, class); atomic_inc(&node->refcount); } } @@ -436,10 +469,162 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, return ft; } +/* If reverse is false, then we search for the first flow table in the + * root sub-tree from start(closest from right), else we search for the + * last flow table in the root sub-tree till start(closest from left). + */ +static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, + struct list_head *start, + bool reverse) +{ +#define list_advance_entry(pos, reverse) \ + ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list)) + +#define list_for_each_advance_continue(pos, head, reverse) \ + for (pos = list_advance_entry(pos, reverse); \ + &pos->list != (head); \ + pos = list_advance_entry(pos, reverse)) + + struct fs_node *iter = list_entry(start, struct fs_node, list); + struct mlx5_flow_table *ft = NULL; + + if (!root) + return NULL; + + list_for_each_advance_continue(iter, &root->children, reverse) { + if (iter->type == FS_TYPE_FLOW_TABLE) { + fs_get_obj(ft, iter); + return ft; + } + ft = find_closest_ft_recursive(iter, &iter->children, reverse); + if (ft) + return ft; + } + + return ft; +} + +/* If reverse if false then return the first flow table in next priority of + * prio in the tree, else return the last flow table in the previous priority + * of prio in the tree. + */ +static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse) +{ + struct mlx5_flow_table *ft = NULL; + struct fs_node *curr_node; + struct fs_node *parent; + + parent = prio->node.parent; + curr_node = &prio->node; + while (!ft && parent) { + ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); + curr_node = parent; + parent = curr_node->parent; + } + return ft; +} + +/* Assuming all the tree is locked by mutex chain lock */ +static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio) +{ + return find_closest_ft(prio, false); +} + +/* Assuming all the tree is locked by mutex chain lock */ +static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) +{ + return find_closest_ft(prio, true); +} + +static int connect_fts_in_prio(struct mlx5_core_dev *dev, + struct fs_prio *prio, + struct mlx5_flow_table *ft) +{ + struct mlx5_flow_table *iter; + int i = 0; + int err; + + fs_for_each_ft(iter, prio) { + i++; + err = mlx5_cmd_modify_flow_table(dev, + iter, + ft); + if (err) { + mlx5_core_warn(dev, "Failed to modify flow table %d\n", + iter->id); + /* The driver is out of sync with the FW */ + if (i > 1) + WARN_ON(true); + return err; + } + } + return 0; +} + +/* Connect flow tables from previous priority of prio to ft */ +static int connect_prev_fts(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + struct mlx5_flow_table *prev_ft; + + prev_ft = find_prev_chained_ft(prio); + if (prev_ft) { + struct fs_prio *prev_prio; + + fs_get_obj(prev_prio, prev_ft->node.parent); + return connect_fts_in_prio(dev, prev_prio, ft); + } + return 0; +} + +static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio + *prio) +{ + struct mlx5_flow_root_namespace *root = find_root(&prio->node); + int min_level = INT_MAX; + int err; + + if (root->root_ft) + min_level = root->root_ft->level; + + if (ft->level >= min_level) + return 0; + + err = mlx5_cmd_update_root_ft(root->dev, ft); + if (err) + mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", + ft->id); + else + root->root_ft = ft; + + return err; +} + +static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + int err = 0; + + /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ + + if (list_empty(&prio->node.children)) { + err = connect_prev_fts(dev, ft, prio); + if (err) + return err; + } + + if (MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive.modify_root)) + err = update_root_ft_create(ft, prio); + return err; +} + struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte) { + struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *ft; int err; int log_table_sz; @@ -452,14 +637,15 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, return ERR_PTR(-ENODEV); } + mutex_lock(&root->chain_lock); fs_prio = find_prio(ns, prio); - if (!fs_prio) - return ERR_PTR(-EINVAL); - - lock_ref_node(&fs_prio->node); + if (!fs_prio) { + err = -EINVAL; + goto unlock_root; + } if (fs_prio->num_ft == fs_prio->max_ft) { err = -ENOSPC; - goto unlock_prio; + goto unlock_root; } ft = alloc_flow_table(find_next_free_level(fs_prio), @@ -467,32 +653,63 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, root->table_type); if (!ft) { err = -ENOMEM; - goto unlock_prio; + goto unlock_root; } tree_init_node(&ft->node, 1, del_flow_table); log_table_sz = ilog2(ft->max_fte); + next_ft = find_next_chained_ft(fs_prio); err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level, - log_table_sz, &ft->id); + log_table_sz, next_ft, &ft->id); if (err) goto free_ft; + err = connect_flow_table(root->dev, ft, fs_prio); + if (err) + goto destroy_ft; + lock_ref_node(&fs_prio->node); tree_add_node(&ft->node, &fs_prio->node); list_add_tail(&ft->node.list, &fs_prio->node.children); fs_prio->num_ft++; unlock_ref_node(&fs_prio->node); - + mutex_unlock(&root->chain_lock); return ft; - +destroy_ft: + mlx5_cmd_destroy_flow_table(root->dev, ft); free_ft: kfree(ft); -unlock_prio: - unlock_ref_node(&fs_prio->node); +unlock_root: + mutex_unlock(&root->chain_lock); return ERR_PTR(err); } -struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, - u32 *fg_in) +struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + int max_num_groups) +{ + struct mlx5_flow_table *ft; + + if (max_num_groups > num_flow_table_entries) + return ERR_PTR(-EINVAL); + + ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries); + if (IS_ERR(ft)) + return ft; + + ft->autogroup.active = true; + ft->autogroup.required_groups = max_num_groups; + + return ft; +} +EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); + +/* Flow table should be locked */ +static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft, + u32 *fg_in, + struct list_head + *prev_fg, + bool is_auto_fg) { struct mlx5_flow_group *fg; struct mlx5_core_dev *dev = get_dev(&ft->node); @@ -505,18 +722,33 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, if (IS_ERR(fg)) return fg; - lock_ref_node(&ft->node); err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); if (err) { kfree(fg); - unlock_ref_node(&ft->node); return ERR_PTR(err); } + + if (ft->autogroup.active) + ft->autogroup.num_groups++; /* Add node to tree */ - tree_init_node(&fg->node, 1, del_flow_group); + tree_init_node(&fg->node, !is_auto_fg, del_flow_group); tree_add_node(&fg->node, &ft->node); /* Add node to group list */ list_add(&fg->node.list, ft->node.children.prev); + + return fg; +} + +struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, + u32 *fg_in) +{ + struct mlx5_flow_group *fg; + + if (ft->autogroup.active) + return ERR_PTR(-EPERM); + + lock_ref_node(&ft->node); + fg = create_flow_group_common(ft, fg_in, &ft->node.children, false); unlock_ref_node(&ft->node); return fg; @@ -614,7 +846,63 @@ static struct fs_fte *create_fte(struct mlx5_flow_group *fg, return fte; } -/* Assuming parent fg(flow table) is locked */ +static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct list_head *prev = &ft->node.children; + unsigned int candidate_index = 0; + struct mlx5_flow_group *fg; + void *match_criteria_addr; + unsigned int group_size = 0; + u32 *in; + + if (!ft->autogroup.active) + return ERR_PTR(-ENOENT); + + in = mlx5_vzalloc(inlen); + if (!in) + return ERR_PTR(-ENOMEM); + + if (ft->autogroup.num_groups < ft->autogroup.required_groups) + /* We save place for flow groups in addition to max types */ + group_size = ft->max_fte / (ft->autogroup.required_groups + 1); + + /* ft->max_fte == ft->autogroup.max_types */ + if (group_size == 0) + group_size = 1; + + /* sorted by start_index */ + fs_for_each_fg(fg, ft) { + if (candidate_index + group_size > fg->start_index) + candidate_index = fg->start_index + fg->max_ftes; + else + break; + prev = &fg->node.list; + } + + if (candidate_index + group_size > ft->max_fte) { + fg = ERR_PTR(-ENOSPC); + goto out; + } + + MLX5_SET(create_flow_group_in, in, match_criteria_enable, + match_criteria_enable); + MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index); + MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index + + group_size - 1); + match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, + in, match_criteria); + memcpy(match_criteria_addr, match_criteria, + MLX5_ST_SZ_BYTES(fte_match_param)); + + fg = create_flow_group_common(ft, in, prev, true); +out: + kvfree(in); + return fg; +} + static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, u32 *match_value, u8 action, @@ -626,9 +914,9 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, struct mlx5_flow_table *ft; struct list_head *prev; - lock_ref_node(&fg->node); + nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT); fs_for_each_fte(fte, fg) { - nested_lock_ref_node(&fte->node); + nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); if (compare_match_value(&fg->mask, match_value, &fte->val) && action == fte->action && flow_tag == fte->flow_tag) { rule = add_rule_fte(fte, fg, dest); @@ -669,6 +957,33 @@ unlock_fg: return rule; } +static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u8 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_rule *rule; + struct mlx5_flow_group *g; + + g = create_autogroup(ft, match_criteria_enable, match_criteria); + if (IS_ERR(g)) + return (void *)g; + + rule = add_rule_fg(g, match_value, + action, flow_tag, dest); + if (IS_ERR(rule)) { + /* Remove assumes refcount > 0 and autogroup creates a group + * with a refcount = 0. + */ + tree_get_node(&g->node); + tree_remove_node(&g->node); + } + return rule; +} + struct mlx5_flow_rule * mlx5_add_flow_rule(struct mlx5_flow_table *ft, u8 match_criteria_enable, @@ -679,39 +994,115 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, struct mlx5_flow_destination *dest) { struct mlx5_flow_group *g; - struct mlx5_flow_rule *rule = ERR_PTR(-EINVAL); + struct mlx5_flow_rule *rule; - tree_get_node(&ft->node); - lock_ref_node(&ft->node); + nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); fs_for_each_fg(g, ft) if (compare_match_criteria(g->mask.match_criteria_enable, match_criteria_enable, g->mask.match_criteria, match_criteria)) { - unlock_ref_node(&ft->node); rule = add_rule_fg(g, match_value, action, flow_tag, dest); - goto put; + if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) + goto unlock; } + + rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, + match_value, action, flow_tag, dest); +unlock: unlock_ref_node(&ft->node); -put: - tree_put_node(&ft->node); return rule; } +EXPORT_SYMBOL(mlx5_add_flow_rule); void mlx5_del_flow_rule(struct mlx5_flow_rule *rule) { tree_remove_node(&rule->node); } +EXPORT_SYMBOL(mlx5_del_flow_rule); + +/* Assuming prio->node.children(flow tables) is sorted by level */ +static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) +{ + struct fs_prio *prio; + + fs_get_obj(prio, ft->node.parent); + + if (!list_is_last(&ft->node.list, &prio->node.children)) + return list_next_entry(ft, node.list); + return find_next_chained_ft(prio); +} + +static int update_root_ft_destroy(struct mlx5_flow_table *ft) +{ + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + struct mlx5_flow_table *new_root_ft = NULL; + + if (root->root_ft != ft) + return 0; + + new_root_ft = find_next_ft(ft); + if (new_root_ft) { + int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); + + if (err) { + mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", + ft->id); + return err; + } + root->root_ft = new_root_ft; + } + return 0; +} + +/* Connect flow table from previous priority to + * the next flow table. + */ +static int disconnect_flow_table(struct mlx5_flow_table *ft) +{ + struct mlx5_core_dev *dev = get_dev(&ft->node); + struct mlx5_flow_table *next_ft; + struct fs_prio *prio; + int err = 0; + + err = update_root_ft_destroy(ft); + if (err) + return err; + + fs_get_obj(prio, ft->node.parent); + if (!(list_first_entry(&prio->node.children, + struct mlx5_flow_table, + node.list) == ft)) + return 0; + + next_ft = find_next_chained_ft(prio); + err = connect_prev_fts(dev, next_ft, prio); + if (err) + mlx5_core_warn(dev, "Failed to disconnect flow table %d\n", + ft->id); + return err; +} int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) { + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + int err = 0; + + mutex_lock(&root->chain_lock); + err = disconnect_flow_table(ft); + if (err) { + mutex_unlock(&root->chain_lock); + return err; + } if (tree_remove_node(&ft->node)) mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", ft->id); + mutex_unlock(&root->chain_lock); - return 0; + return err; } +EXPORT_SYMBOL(mlx5_destroy_flow_table); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) { @@ -732,8 +1123,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return NULL; switch (type) { + case MLX5_FLOW_NAMESPACE_BYPASS: case MLX5_FLOW_NAMESPACE_KERNEL: - prio = 0; + case MLX5_FLOW_NAMESPACE_LEFTOVERS: + prio = type; break; case MLX5_FLOW_NAMESPACE_FDB: if (dev->priv.fdb_root_ns) @@ -754,10 +1147,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return ns; } +EXPORT_SYMBOL(mlx5_get_flow_namespace); static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, - unsigned prio, int max_ft, - int start_level) + unsigned prio, int max_ft) { struct fs_prio *fs_prio; @@ -770,7 +1163,6 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, tree_add_node(&fs_prio->node, &ns->node); fs_prio->max_ft = max_ft; fs_prio->prio = prio; - fs_prio->start_level = start_level; list_add_tail(&fs_prio->node.list, &ns->node.children); return fs_prio; @@ -800,11 +1192,45 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) return ns; } -static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *init_node, +static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node + *prio_metadata) +{ + struct fs_prio *fs_prio; + int i; + + for (i = 0; i < prio_metadata->num_leaf_prios; i++) { + fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft); + if (IS_ERR(fs_prio)) + return PTR_ERR(fs_prio); + } + return 0; +} + +#define FLOW_TABLE_BIT_SZ 1 +#define GET_FLOW_TABLE_CAP(dev, offset) \ + ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ + offset / 32)) >> \ + (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) +static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) +{ + int i; + + for (i = 0; i < caps->arr_sz; i++) { + if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i])) + return false; + } + return true; +} + +static int init_root_tree_recursive(struct mlx5_core_dev *dev, + struct init_tree_node *init_node, struct fs_node *fs_parent_node, struct init_tree_node *init_parent_node, int index) { + int max_ft_level = MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive. + max_ft_level); struct mlx5_flow_namespace *fs_ns; struct fs_prio *fs_prio; struct fs_node *base; @@ -812,12 +1238,14 @@ static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *ini int err; if (init_node->type == FS_TYPE_PRIO) { - if (init_node->min_ft_level > max_ft_level) - return -ENOTSUPP; + if ((init_node->min_ft_level > max_ft_level) || + !has_required_caps(dev, &init_node->caps)) + return 0; fs_get_obj(fs_ns, fs_parent_node); - fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft, - init_node->start_level); + if (init_node->num_leaf_prios) + return create_leaf_prios(fs_ns, init_node); + fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft); if (IS_ERR(fs_prio)) return PTR_ERR(fs_prio); base = &fs_prio->node; @@ -831,9 +1259,8 @@ static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *ini return -EINVAL; } for (i = 0; i < init_node->ar_size; i++) { - err = init_root_tree_recursive(max_ft_level, - &init_node->children[i], base, - init_node, i); + err = init_root_tree_recursive(dev, &init_node->children[i], + base, init_node, i); if (err) return err; } @@ -841,7 +1268,8 @@ static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *ini return 0; } -static int init_root_tree(int max_ft_level, struct init_tree_node *init_node, +static int init_root_tree(struct mlx5_core_dev *dev, + struct init_tree_node *init_node, struct fs_node *fs_parent_node) { int i; @@ -850,8 +1278,7 @@ static int init_root_tree(int max_ft_level, struct init_tree_node *init_node, fs_get_obj(fs_ns, fs_parent_node); for (i = 0; i < init_node->ar_size; i++) { - err = init_root_tree_recursive(max_ft_level, - &init_node->children[i], + err = init_root_tree_recursive(dev, &init_node->children[i], &fs_ns->node, init_node, i); if (err) @@ -877,25 +1304,65 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev ns = &root_ns->ns; fs_init_namespace(ns); + mutex_init(&root_ns->chain_lock); tree_init_node(&ns->node, 1, NULL); tree_add_node(&ns->node, NULL); return root_ns; } +static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level); + +static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) +{ + struct fs_prio *prio; + + fs_for_each_prio(prio, ns) { + /* This updates prio start_level and max_ft */ + set_prio_attrs_in_prio(prio, acc_level); + acc_level += prio->max_ft; + } + return acc_level; +} + +static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) +{ + struct mlx5_flow_namespace *ns; + int acc_level_ns = acc_level; + + prio->start_level = acc_level; + fs_for_each_ns(ns, prio) + /* This updates start_level and max_ft of ns's priority descendants */ + acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); + if (!prio->max_ft) + prio->max_ft = acc_level_ns - prio->start_level; + WARN_ON(prio->max_ft < acc_level_ns - prio->start_level); +} + +static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) +{ + struct mlx5_flow_namespace *ns = &root_ns->ns; + struct fs_prio *prio; + int start_level = 0; + + fs_for_each_prio(prio, ns) { + set_prio_attrs_in_prio(prio, start_level); + start_level += prio->max_ft; + } +} + static int init_root_ns(struct mlx5_core_dev *dev) { - int max_ft_level = MLX5_CAP_FLOWTABLE(dev, - flow_table_properties_nic_receive. - max_ft_level); dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX); if (IS_ERR_OR_NULL(dev->priv.root_ns)) goto cleanup; - if (init_root_tree(max_ft_level, &root_fs, &dev->priv.root_ns->ns.node)) + if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node)) goto cleanup; + set_prio_attrs(dev->priv.root_ns); + return 0; cleanup: @@ -1019,7 +1486,7 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev) return -ENOMEM; /* Create single prio */ - prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1, 0); + prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1); if (IS_ERR(prio)) { cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); return PTR_ERR(prio); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 4ebb97fd5544..00245fd7e4bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -77,6 +77,11 @@ struct mlx5_flow_table { unsigned int max_fte; unsigned int level; enum fs_flow_table_type type; + struct { + bool active; + unsigned int required_groups; + unsigned int num_groups; + } autogroup; }; /* Type of children is mlx5_flow_rule */ @@ -124,6 +129,9 @@ struct mlx5_flow_root_namespace { struct mlx5_flow_namespace ns; enum fs_flow_table_type table_type; struct mlx5_core_dev *dev; + struct mlx5_flow_table *root_ft; + /* Should be held when chaining flow tables */ + struct mutex chain_lock; }; int mlx5_init_fs(struct mlx5_core_dev *dev); @@ -143,6 +151,12 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); #define fs_for_each_prio(pos, ns) \ fs_list_for_each_entry(pos, &(ns)->node.children) +#define fs_for_each_ns(pos, prio) \ + fs_list_for_each_entry(pos, &(prio)->node.children) + +#define fs_for_each_ft(pos, prio) \ + fs_list_for_each_entry(pos, &(prio)->node.children) + #define fs_for_each_fg(pos, ft) \ fs_list_for_each_entry(pos, &(ft)->node.children) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 67676cf0d507..1545a944c309 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -78,6 +78,11 @@ struct mlx5_device_context { void *context; }; +enum { + MLX5_ATOMIC_REQ_MODE_BE = 0x0, + MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, +}; + static struct mlx5_profile profile[] = { [0] = { .mask = 0, @@ -387,7 +392,7 @@ query_ex: return err; } -static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) +static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod) { u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; int err; @@ -395,6 +400,7 @@ static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) memset(out, 0, sizeof(out)); MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); + MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); if (err) return err; @@ -404,6 +410,46 @@ static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) return err; } +static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) +{ + void *set_ctx; + void *set_hca_cap; + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + int req_endianness; + int err; + + if (MLX5_CAP_GEN(dev, atomic)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + } else { + return 0; + } + + req_endianness = + MLX5_CAP_ATOMIC(dev, + supported_atomic_req_8B_endianess_mode_1); + + if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) + return 0; + + set_ctx = kzalloc(set_sz, GFP_KERNEL); + if (!set_ctx) + return -ENOMEM; + + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); + + /* Set requestor to host endianness */ + MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, + MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); + + err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC); + + kfree(set_ctx); + return err; +} + static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; @@ -445,7 +491,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); - err = set_caps(dev, set_ctx, set_sz); + err = set_caps(dev, set_ctx, set_sz, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); query_ex: kfree(set_ctx); @@ -585,7 +632,8 @@ static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) mlx5_irq_clear_affinity_hint(mdev, i); } -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, + unsigned int *irqn) { struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq, *n; @@ -666,7 +714,6 @@ clean: return err; } -#ifdef CONFIG_MLX5_CORE_EN static int mlx5_core_set_issi(struct mlx5_core_dev *dev) { u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]; @@ -719,7 +766,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) return -ENOTSUPP; } -#endif static int map_bf_area(struct mlx5_core_dev *dev) { @@ -965,13 +1011,11 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_pagealloc_cleanup; } -#ifdef CONFIG_MLX5_CORE_EN err = mlx5_core_set_issi(dev); if (err) { dev_err(&pdev->dev, "failed to set issi\n"); goto err_disable_hca; } -#endif err = mlx5_satisfy_startup_pages(dev, 1); if (err) { @@ -991,6 +1035,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto reclaim_boot_pages; } + err = handle_hca_cap_atomic(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n"); + goto reclaim_boot_pages; + } + err = mlx5_satisfy_startup_pages(dev, 0); if (err) { dev_err(&pdev->dev, "failed to allocate init pages\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 30e2ba3f5f16..def289375ecb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -36,6 +36,7 @@ #include <linux/mlx5/cmd.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/transobj.h> #include "mlx5_core.h" @@ -67,6 +68,52 @@ void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common) complete(&common->free); } +static u64 qp_allowed_event_types(void) +{ + u64 mask; + + mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) | + BIT(MLX5_EVENT_TYPE_COMM_EST) | + BIT(MLX5_EVENT_TYPE_SQ_DRAINED) | + BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) | + BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | + BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) | + BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | + BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR); + + return mask; +} + +static u64 rq_allowed_event_types(void) +{ + u64 mask; + + mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) | + BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR); + + return mask; +} + +static u64 sq_allowed_event_types(void) +{ + return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR); +} + +static bool is_event_type_allowed(int rsc_type, int event_type) +{ + switch (rsc_type) { + case MLX5_EVENT_QUEUE_TYPE_QP: + return BIT(event_type) & qp_allowed_event_types(); + case MLX5_EVENT_QUEUE_TYPE_RQ: + return BIT(event_type) & rq_allowed_event_types(); + case MLX5_EVENT_QUEUE_TYPE_SQ: + return BIT(event_type) & sq_allowed_event_types(); + default: + WARN(1, "Event arrived for unknown resource type"); + return false; + } +} + void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) { struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn); @@ -75,8 +122,16 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) if (!common) return; + if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) { + mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n", + event_type, rsn); + return; + } + switch (common->res) { case MLX5_RES_QP: + case MLX5_RES_RQ: + case MLX5_RES_SQ: qp = (struct mlx5_core_qp *)common; qp->event(qp, event_type); break; @@ -177,27 +232,56 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) } #endif +static int create_qprqsq_common(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + int rsc_type) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + int err; + + qp->common.res = rsc_type; + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, + qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN), + qp); + spin_unlock_irq(&table->lock); + if (err) + return err; + + atomic_set(&qp->common.refcount, 1); + init_completion(&qp->common.free); + qp->pid = current->pid; + + return 0; +} + +static void destroy_qprqsq_common(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + unsigned long flags; + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, + qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN)); + spin_unlock_irqrestore(&table->lock, flags); + mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); + wait_for_completion(&qp->common.free); +} + int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, int inlen) { - struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_create_qp_mbox_out out; struct mlx5_destroy_qp_mbox_in din; struct mlx5_destroy_qp_mbox_out dout; int err; - void *qpc; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); - if (dev->issi) { - qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); - /* 0xffffff means we ask to work with cqe version 0 */ - MLX5_SET(qpc, qpc, user_index, 0xffffff); - } - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "ret %d\n", err); @@ -213,24 +297,16 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); - qp->common.res = MLX5_RES_QP; - spin_lock_irq(&table->lock); - err = radix_tree_insert(&table->tree, qp->qpn, qp); - spin_unlock_irq(&table->lock); - if (err) { - mlx5_core_warn(dev, "err %d\n", err); + err = create_qprqsq_common(dev, qp, MLX5_RES_QP); + if (err) goto err_cmd; - } err = mlx5_debug_qp_add(dev, qp); if (err) mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", qp->qpn); - qp->pid = current->pid; - atomic_set(&qp->common.refcount, 1); atomic_inc(&dev->num_qps); - init_completion(&qp->common.free); return 0; @@ -250,18 +326,11 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, { struct mlx5_destroy_qp_mbox_in in; struct mlx5_destroy_qp_mbox_out out; - struct mlx5_qp_table *table = &dev->priv.qp_table; - unsigned long flags; int err; mlx5_debug_qp_remove(dev, qp); - spin_lock_irqsave(&table->lock, flags); - radix_tree_delete(&table->tree, qp->qpn); - spin_unlock_irqrestore(&table->lock, flags); - - mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); - wait_for_completion(&qp->common.free); + destroy_qprqsq_common(dev, qp); memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); @@ -279,59 +348,15 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, - enum mlx5_qp_state new_state, +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, struct mlx5_modify_qp_mbox_in *in, int sqd_event, struct mlx5_core_qp *qp) { - static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { - [MLX5_QP_STATE_RST] = { - [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, - [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, - }, - [MLX5_QP_STATE_INIT] = { - [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, - [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, - [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, - }, - [MLX5_QP_STATE_RTR] = { - [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, - [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, - }, - [MLX5_QP_STATE_RTS] = { - [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, - [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, - }, - [MLX5_QP_STATE_SQD] = { - [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, - [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - }, - [MLX5_QP_STATE_SQER] = { - [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, - [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, - }, - [MLX5_QP_STATE_ERR] = { - [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, - [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - } - }; - struct mlx5_modify_qp_mbox_out out; int err = 0; - u16 op; - - if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE || - !optab[cur_state][new_state]) - return -EINVAL; memset(&out, 0, sizeof(out)); - op = optab[cur_state][new_state]; - in->hdr.opcode = cpu_to_be16(op); + in->hdr.opcode = cpu_to_be16(operation); in->qpn = cpu_to_be32(qp->qpn); err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); if (err) @@ -449,3 +474,67 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, } EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); #endif + +int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *rq) +{ + int err; + u32 rqn; + + err = mlx5_core_create_rq(dev, in, inlen, &rqn); + if (err) + return err; + + rq->qpn = rqn; + err = create_qprqsq_common(dev, rq, MLX5_RES_RQ); + if (err) + goto err_destroy_rq; + + return 0; + +err_destroy_rq: + mlx5_core_destroy_rq(dev, rq->qpn); + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_rq_tracked); + +void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, + struct mlx5_core_qp *rq) +{ + destroy_qprqsq_common(dev, rq); + mlx5_core_destroy_rq(dev, rq->qpn); +} +EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked); + +int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *sq) +{ + int err; + u32 sqn; + + err = mlx5_core_create_sq(dev, in, inlen, &sqn); + if (err) + return err; + + sq->qpn = sqn; + err = create_qprqsq_common(dev, sq, MLX5_RES_SQ); + if (err) + goto err_destroy_sq; + + return 0; + +err_destroy_sq: + mlx5_core_destroy_sq(dev, sq->qpn); + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_sq_tracked); + +void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, + struct mlx5_core_qp *sq) +{ + destroy_qprqsq_common(dev, sq); + mlx5_core_destroy_sq(dev, sq->qpn); +} +EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index ffada801976b..04bc522605a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -37,7 +37,7 @@ #include <linux/mlx5/srq.h> #include <rdma/ib_verbs.h> #include "mlx5_core.h" -#include "transobj.h" +#include <linux/mlx5/transobj.h> void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) { @@ -241,8 +241,6 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); memcpy(pas, in->pas, pas_size); - /* 0xffffff means we ask to work with cqe version 0 */ - MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff); MLX5_SET(create_xrc_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index d7068f54e800..03a5093ffeb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -32,9 +32,9 @@ #include <linux/mlx5/driver.h> #include "mlx5_core.h" -#include "transobj.h" +#include <linux/mlx5/transobj.h> -int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn) +int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn) { u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)]; u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)]; @@ -53,8 +53,9 @@ int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn) return err; } +EXPORT_SYMBOL(mlx5_core_alloc_transport_domain); -void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn) +void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn) { u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)]; u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)]; @@ -68,6 +69,7 @@ void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) { @@ -94,6 +96,7 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) memset(out, 0, sizeof(out)); return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_modify_rq); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) { @@ -108,6 +111,18 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0}; + int outlen = MLX5_ST_SZ_BYTES(query_rq_out); + + MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ); + MLX5_SET(query_rq_in, in, rqn, rqn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} +EXPORT_SYMBOL(mlx5_core_query_rq); + int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) { u32 out[MLX5_ST_SZ_DW(create_sq_out)]; @@ -133,6 +148,7 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) memset(out, 0, sizeof(out)); return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_modify_sq); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) { @@ -147,6 +163,18 @@ void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0}; + int outlen = MLX5_ST_SZ_BYTES(query_sq_out); + + MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ); + MLX5_SET(query_sq_in, in, sqn, sqn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} +EXPORT_SYMBOL(mlx5_core_query_sq); + int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) { @@ -162,6 +190,7 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, return err; } +EXPORT_SYMBOL(mlx5_core_create_tir); int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, int inlen) @@ -187,6 +216,7 @@ void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_destroy_tir); int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn) @@ -203,6 +233,19 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, return err; } +EXPORT_SYMBOL(mlx5_core_create_tis); + +int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_tis_out)] = {0}; + + MLX5_SET(modify_tis_in, in, tisn, tisn); + MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS); + + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} +EXPORT_SYMBOL(mlx5_core_modify_tis); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) { @@ -216,6 +259,7 @@ void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_destroy_tis); int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h deleted file mode 100644 index 74cae51436e4..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __TRANSOBJ_H__ -#define __TRANSOBJ_H__ - -int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); -void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); -int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rqn); -int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); -void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); -int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *sqn); -int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); -void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); -int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tirn); -int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, - int inlen); -void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); -int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tisn); -void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); -int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rmpn); -int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); -int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); -int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); -int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rmpn); -int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); -int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); - -int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rqtn); -int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, - int inlen); -void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); - -#endif /* __TRANSOBJ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 076197efea9b..c7398b95aecd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -76,7 +76,7 @@ u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) return MLX5_GET(query_vport_state_out, out, admin_state); } -EXPORT_SYMBOL(mlx5_query_vport_admin_state); +EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state) @@ -104,7 +104,7 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, return err; } -EXPORT_SYMBOL(mlx5_modify_vport_admin_state); +EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state); static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, u32 *out, int outlen) @@ -151,12 +151,9 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, nic_vport_context.permanent_address); err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); - if (err) - goto out; - - ether_addr_copy(addr, &out_addr[2]); + if (!err) + ether_addr_copy(addr, &out_addr[2]); -out: kvfree(out); return err; } @@ -197,7 +194,7 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, return err; } -EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address); +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, u32 vport, @@ -430,6 +427,68 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans); +int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, + u64 *system_image_guid) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + mlx5_query_nic_vport_context(mdev, 0, out, outlen); + + *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out, + nic_vport_context.system_image_guid); + + kfree(out); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid); + +int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + mlx5_query_nic_vport_context(mdev, 0, out, outlen); + + *node_guid = MLX5_GET64(query_nic_vport_context_out, out, + nic_vport_context.node_guid); + + kfree(out); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); + +int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, + u16 *qkey_viol_cntr) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + mlx5_query_nic_vport_context(mdev, 0, out, outlen); + + *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.qkey_violation_counter); + + kfree(out); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr); + int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, u16 gid_index, union ib_gid *gid) @@ -750,3 +809,44 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, return err; } EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc); + +enum mlx5_vport_roce_state { + MLX5_VPORT_ROCE_DISABLED = 0, + MLX5_VPORT_ROCE_ENABLED = 1, +}; + +static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev, + enum mlx5_vport_roce_state state) +{ + void *in; + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(mdev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1); + MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en, + state); + + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + + kvfree(in); + + return err; +} + +int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev) +{ + return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED); +} +EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce); + +int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev) +{ + return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); +} +EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 5b9364f4837d..1ac8bf187168 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -130,7 +130,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n"); return err; } - return err ? err : len; + return len; } static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 66d851d4dfb4..bb77e2207804 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -99,6 +99,55 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = { */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); +/* SMID - Switch Multicast ID + * -------------------------- + * The MID record maps from a MID (Multicast ID), which is a unique identifier + * of the multicast group within the stacking domain, into a list of local + * ports into which the packet is replicated. + */ +#define MLXSW_REG_SMID_ID 0x2007 +#define MLXSW_REG_SMID_LEN 0x240 + +static const struct mlxsw_reg_info mlxsw_reg_smid = { + .id = MLXSW_REG_SMID_ID, + .len = MLXSW_REG_SMID_LEN, +}; + +/* reg_smid_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); + +/* reg_smid_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); + +/* reg_smid_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); + +/* reg_smid_port_mask + * Local port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_smid_pack(char *payload, u16 mid, + u8 port, bool set) +{ + MLXSW_REG_ZERO(smid, payload); + mlxsw_reg_smid_swid_set(payload, 0); + mlxsw_reg_smid_mid_set(payload, mid); + mlxsw_reg_smid_port_set(payload, port, set); + mlxsw_reg_smid_port_mask_set(payload, port, 1); +} + /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -287,6 +336,7 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8, enum mlxsw_reg_sfd_rec_type { MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, + MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, }; /* reg_sfd_rec_type @@ -379,7 +429,6 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16, static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index, enum mlxsw_reg_sfd_rec_type rec_type, - enum mlxsw_reg_sfd_rec_policy policy, const char *mac, enum mlxsw_reg_sfd_rec_action action) { @@ -389,7 +438,6 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index, mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1); mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0); mlxsw_reg_sfd_rec_type_set(payload, rec_index, rec_type); - mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac); mlxsw_reg_sfd_rec_action_set(payload, rec_index, action); } @@ -401,8 +449,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, u8 local_port) { mlxsw_reg_sfd_rec_pack(payload, rec_index, - MLXSW_REG_SFD_REC_TYPE_UNICAST, - policy, mac, action); + MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid); mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); @@ -461,7 +509,8 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index, { mlxsw_reg_sfd_rec_pack(payload, rec_index, MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG, - policy, mac, action); + mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0); mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid); mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid); @@ -477,6 +526,45 @@ static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index, *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index); } +/* reg_sfd_mc_pgi + * + * Multicast port group index - index into the port group table. + * Value 0x1FFF indicates the pgi should point to the MID entry. + * For Spectrum this value must be set to 0x1FFF + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_pgi, MLXSW_REG_SFD_BASE_LEN, 16, 13, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_fid_vid + * + * Filtering ID or VLAN ID + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_mid + * + * Multicast identifier - global identifier that represents the multicast + * group across all devices. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_mid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, u16 mid) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_MULTICAST, mac, action); + mlxsw_reg_sfd_mc_pgi_set(payload, rec_index, 0x1FFF); + mlxsw_reg_sfd_mc_fid_vid_set(payload, rec_index, fid_vid); + mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); +} + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -956,6 +1044,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_port_mask_set(payload, port, 1); } +/* SFDF - Switch Filtering DB Flush + * -------------------------------- + * The switch filtering DB flush register is used to flush the FDB. + * Note that FDB notifications are flushed as well. + */ +#define MLXSW_REG_SFDF_ID 0x2013 +#define MLXSW_REG_SFDF_LEN 0x14 + +static const struct mlxsw_reg_info mlxsw_reg_sfdf = { + .id = MLXSW_REG_SFDF_ID, + .len = MLXSW_REG_SFDF_LEN, +}; + +/* reg_sfdf_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8); + +enum mlxsw_reg_sfdf_flush_type { + MLXSW_REG_SFDF_FLUSH_PER_SWID, + MLXSW_REG_SFDF_FLUSH_PER_FID, + MLXSW_REG_SFDF_FLUSH_PER_PORT, + MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID, + MLXSW_REG_SFDF_FLUSH_PER_LAG, + MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID, +}; + +/* reg_sfdf_flush_type + * Flush type. + * 0 - All SWID dynamic entries are flushed. + * 1 - All FID dynamic entries are flushed. + * 2 - All dynamic entries pointing to port are flushed. + * 3 - All FID dynamic entries pointing to port are flushed. + * 4 - All dynamic entries pointing to LAG are flushed. + * 5 - All FID dynamic entries pointing to LAG are flushed. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4); + +/* reg_sfdf_flush_static + * Static. + * 0 - Flush only dynamic entries. + * 1 - Flush both dynamic and static entries. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1); + +static inline void mlxsw_reg_sfdf_pack(char *payload, + enum mlxsw_reg_sfdf_flush_type type) +{ + MLXSW_REG_ZERO(sfdf, payload); + mlxsw_reg_sfdf_flush_type_set(payload, type); + mlxsw_reg_sfdf_flush_static_set(payload, true); +} + +/* reg_sfdf_fid + * FID to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16); + +/* reg_sfdf_system_port + * Port to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16); + +/* reg_sfdf_port_fid_system_port + * Port to flush, pointed to by FID. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16); + +/* reg_sfdf_lag_id + * LAG ID to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10); + +/* reg_sfdf_lag_fid_lag_id + * LAG ID to flush, pointed to by FID. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10); + /* SLDR - Switch LAG Descriptor Register * ----------------------------------------- * The switch LAG descriptor register is populated by LAG descriptors. @@ -1613,20 +1787,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8); * Module number. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false); /* reg_pmlp_tx_lane * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false); /* reg_pmlp_rx_lane * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is * equal to Tx lane. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false); static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) { @@ -3013,6 +3187,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SGCR"; case MLXSW_REG_SPAD_ID: return "SPAD"; + case MLXSW_REG_SMID_ID: + return "SMID"; case MLXSW_REG_SSPR_ID: return "SSPR"; case MLXSW_REG_SFDAT_ID: @@ -3031,6 +3207,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; + case MLXSW_REG_SFDF_ID: + return "SFDF"; case MLXSW_REG_SLDR_ID: return "SLDR"; case MLXSW_REG_SLCR_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 74ff0110e899..217856bdd400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1370,6 +1370,11 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) err = -ENOMEM; goto err_port_active_vlans_alloc; } + mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->untagged_vlans) { + err = -ENOMEM; + goto err_port_untagged_vlans_alloc; + } INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); mlxsw_sp_port->pcpu_stats = @@ -1472,6 +1477,8 @@ err_port_module_check: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: + kfree(mlxsw_sp_port->untagged_vlans); +err_port_untagged_vlans_alloc: kfree(mlxsw_sp_port->active_vlans); err_port_active_vlans_alloc: free_netdev(dev); @@ -1505,6 +1512,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); + kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); free_netdev(mlxsw_sp_port->dev); } @@ -1850,6 +1858,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, mlxsw_sp->bus_info = mlxsw_bus_info; INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); err = mlxsw_sp_base_mac_get(mlxsw_sp); if (err) { @@ -1931,7 +1940,7 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_max_port_per_lag = 1, .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, .used_max_mid = 1, - .max_mid = 7000, + .max_mid = MLXSW_SP_MID_MAX, .used_max_pgt = 1, .max_pgt = 0, .used_max_system_port = 1, @@ -1970,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; +static int +mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); + mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, + mlxsw_sp_port->local_port); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); + mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, last_err = 0; + u16 vid; + + for (vid = 1; vid < VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); + if (err) + last_err = err; + } + + return last_err; +} + +static int +__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, last_err = 0; + u16 vid; + + for (vid = 1; vid < VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); + if (err) + last_err = err; + } + + return last_err; +} + +static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) +{ + if (!list_empty(&mlxsw_sp_port->vports_list)) + if (mlxsw_sp_port->lagged) + return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); + else + return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); + else + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); + else + return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); +} + +static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + + if (mlxsw_sp_vport->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, + fid); + else + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); +} + static bool mlxsw_sp_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; @@ -1997,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, + bool flush_fdb) { struct net_device *dev = mlxsw_sp_port->dev; + if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) + netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@ -2191,10 +2313,15 @@ err_col_port_enable: return err; } +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev, + bool flush_fdb); + static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port *mlxsw_sp_vport; struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; int err; @@ -2211,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; + /* In case we leave a LAG device that has bridges built on top, + * then their teardown sequence is never issued and we need to + * invoke the necessary cleanup routines ourselves. + */ + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + struct net_device *br_dev; + + if (!mlxsw_sp_vport->bridged) + continue; + + br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); + } + + if (mlxsw_sp_port->bridged) { + mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); + + if (lag->ref_count == 1) + mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + } + if (lag->ref_count == 1) { + if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) + netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); if (err) return err; @@ -2263,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); } -static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev); - static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *vlan_dev) { @@ -2303,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *br_dev; br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); } mlxsw_sp_vport->dev = mlxsw_sp_port->dev; @@ -2365,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, } mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); + err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, + true); mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); if (err) { netdev_err(dev, "Failed to leave bridge\n"); @@ -2532,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev) + struct net_device *br_dev, + bool flush_fdb) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); @@ -2595,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, goto err_vport_flood_set; } + if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) + netdev_err(dev, "Failed to flush FDB\n"); + /* Switch between the vFIDs and destroy the old one if needed. */ new_vfid->nr_vports++; mlxsw_sp_vport->vport.vfid = new_vfid; @@ -2768,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, if (!mlxsw_sp_vport) return NOTIFY_DONE; err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev); + upper_dev, true); if (err) { netdev_err(dev, "Failed to leave bridge\n"); return NOTIFY_BAD; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 463ed6dcc709..7f42eb1c320e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -44,6 +44,7 @@ #include <linux/list.h> #include <net/switchdev.h> +#include "port.h" #include "core.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID @@ -54,6 +55,8 @@ #define MLXSW_SP_LAG_MAX 64 #define MLXSW_SP_PORT_PER_LAG_MAX 16 +#define MLXSW_SP_MID_MAX 7000 + struct mlxsw_sp_port; struct mlxsw_sp_upper { @@ -69,6 +72,14 @@ struct mlxsw_sp_vfid { u16 vid; }; +struct mlxsw_sp_mid { + struct list_head list; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 mid; + unsigned int ref_count; +}; + static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid) { return MLXSW_SP_VFID_BASE + vfid; @@ -93,6 +104,10 @@ struct mlxsw_sp { struct list_head list; unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)]; } br_vfids; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)]; + } br_mids; unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; struct mlxsw_sp_port **ports; struct mlxsw_core *core; @@ -144,6 +159,7 @@ struct mlxsw_sp_port { } vport; /* 802.1Q bridge VLANs */ unsigned long *active_vlans; + unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; }; @@ -237,5 +253,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, bool set, bool only_uc); +void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 62159547ebf9..e492ca2cdecd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -45,12 +45,30 @@ #include <linux/if_bridge.h> #include <linux/workqueue.h> #include <linux/jiffies.h> +#include <linux/rtnetlink.h> #include <net/switchdev.h> #include "spectrum.h" #include "core.h" #include "reg.h" +static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid) +{ + u16 fid = vid; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + + fid = mlxsw_sp_vfid_to_fid(vfid); + } + + if (!fid) + fid = mlxsw_sp_port->pvid; + + return fid; +} + static struct mlxsw_sp_port * mlxsw_sp_port_orig_get(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port) @@ -107,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, int err; switch (state) { - case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_FORWARDING: spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; break; - case BR_STATE_LISTENING: /* fall-through */ case BR_STATE_LEARNING: spms_state = MLXSW_REG_SPMS_STATE_LEARNING; break; + case BR_STATE_LISTENING: /* fall-through */ + case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_BLOCKING: spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; break; @@ -299,6 +317,22 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } +static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool vlan_enabled) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + /* SWITCHDEV_TRANS_PREPARE phase */ + if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { + netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); + return -EINVAL; + } + + return 0; +} + static int mlxsw_sp_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -323,6 +357,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.vlan_filtering); + break; default: err = -EOPNOTSUPP; break; @@ -505,8 +544,13 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, } /* Changing activity bits only if HW operation succeded */ - for (vid = vid_begin; vid <= vid_end; vid++) + for (vid = vid_begin; vid <= vid_end; vid++) { set_bit(vid, mlxsw_sp_port->active_vlans); + if (flag_untagged) + set_bit(vid, mlxsw_sp_port->untagged_vlans); + else + clear_bit(vid, mlxsw_sp_port->untagged_vlans); + } /* STP state change must be done after we set active VLANs */ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, @@ -545,15 +589,15 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { - bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID; + bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; if (switchdev_trans_ph_prepare(trans)) return 0; return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan->vid_begin, vlan->vid_end, - untagged_flag, pvid_flag); + flag_untagged, flag_pvid); } static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) @@ -568,11 +612,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port, +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, const char *mac, u16 fid, bool adding, bool dynamic) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char *sfd_pl; int err; @@ -583,7 +626,7 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, - mlxsw_sp_port->local_port); + local_port); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); @@ -616,24 +659,19 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { - u16 fid = fdb->vid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); u16 lag_vid = 0; if (switchdev_trans_ph_prepare(trans)) return 0; if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); - - fid = mlxsw_sp_vfid_to_fid(vfid); lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); } - if (!fid) - fid = mlxsw_sp_port->pvid; - if (!mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, fdb->addr, fid, true, false); else return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, @@ -642,6 +680,143 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, true, false); } +static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, + u16 fid, u16 mid, bool adding) +{ + char *sfd_pl; + int err; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, mid); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + kfree(sfd_pl); + return err; +} + +static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, + bool add, bool clear_all_ports) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *smid_pl; + int err, i; + + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + + mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); + if (clear_all_ports) { + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + if (mlxsw_sp->ports[i]) + mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + return err; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + + list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { + if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) + return mid; + } + return NULL; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + u16 mid_idx; + + mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, + MLXSW_SP_MID_MAX); + if (mid_idx == MLXSW_SP_MID_MAX) + return NULL; + + mid = kzalloc(sizeof(*mid), GFP_KERNEL); + if (!mid) + return NULL; + + set_bit(mid_idx, mlxsw_sp->br_mids.mapped); + ether_addr_copy(mid->addr, addr); + mid->vid = vid; + mid->mid = mid_idx; + mid->ref_count = 0; + list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); + + return mid; +} + +static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid) +{ + if (--mid->ref_count == 0) { + list_del(&mid->list); + clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); + kfree(mid); + return 1; + } + return 0; +} + +static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + int err = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to allocate MC group\n"); + return -ENOMEM; + } + } + mid->ref_count++; + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, + mid->ref_count == 1); + if (err) { + netdev_err(dev, "Unable to set SMID\n"); + goto err_out; + } + + if (mid->ref_count == 1) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, + true); + if (err) { + netdev_err(dev, "Unable to set MC SFD\n"); + goto err_out; + } + } + + return 0; + +err_out: + __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); + return err; +} + static int mlxsw_sp_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) @@ -667,6 +842,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, SWITCHDEV_OBJ_PORT_FDB(obj), trans); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj), + trans); + break; default: err = -EOPNOTSUPP; break; @@ -757,22 +937,28 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, vlan->vid_begin, vlan->vid_end, false); } +void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) +{ + u16 vid; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); +} + static int mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb) { - u16 fid = fdb->vid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); u16 lag_vid = 0; if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); - - fid = mlxsw_sp_vfid_to_fid(vfid); lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); } if (!mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, fdb->addr, fid, false, false); else @@ -782,6 +968,37 @@ mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, false, false); } +static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + u16 mid_idx; + int err = 0; + + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to remove port from MC DB\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + + mid_idx = mid->mid; + if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, + false); + if (err) + netdev_err(dev, "Unable to remove MC SFD\n"); + } + + return err; +} + static int mlxsw_sp_port_obj_del(struct net_device *dev, const struct switchdev_obj *obj) { @@ -804,6 +1021,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj)); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj)); + break; default: err = -EOPNOTSUPP; break; @@ -828,10 +1049,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + switchdev_obj_dump_cb_t *cb, + struct net_device *orig_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u16 vport_vid = 0, vport_fid = 0; + struct mlxsw_sp_port *tmp; + u16 vport_fid = 0; char *sfd_pl; char mac[ETH_ALEN]; u16 fid; @@ -851,7 +1074,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); vport_fid = mlxsw_sp_vfid_to_fid(tmp); - vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); } mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); @@ -875,12 +1097,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, &local_port); if (local_port == mlxsw_sp_port->local_port) { - if (vport_fid && vport_fid != fid) - continue; - else if (vport_fid) - fdb->vid = vport_vid; - else + if (vport_fid && vport_fid == fid) + fdb->vid = 0; + else if (!vport_fid && + !mlxsw_sp_fid_is_vfid(fid)) fdb->vid = fid; + else + continue; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; err = cb(&fdb->obj); @@ -891,14 +1114,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, mac, &fid, &lag_id); - if (mlxsw_sp_port == - mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { - if (vport_fid && vport_fid != fid) + tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); + if (tmp && tmp->local_port == + mlxsw_sp_port->local_port) { + /* LAG records can only point to LAG + * devices or VLAN devices on top. + */ + if (!netif_is_lag_master(orig_dev) && + !is_vlan_dev(orig_dev)) continue; - else if (vport_fid) - fdb->vid = vport_vid; - else + if (vport_fid && vport_fid == fid) + fdb->vid = 0; + else if (!vport_fid && + !mlxsw_sp_fid_is_vfid(fid)) fdb->vid = fid; + else + continue; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; err = cb(&fdb->obj); @@ -933,6 +1164,8 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, vlan->flags = 0; if (vid == mlxsw_sp_port->pvid) vlan->flags |= BRIDGE_VLAN_INFO_PVID; + if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; vlan->vid_begin = vid; vlan->vid_end = vid; err = cb(&vlan->obj); @@ -960,7 +1193,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb); + SWITCHDEV_OBJ_PORT_FDB(obj), cb, + obj->orig_dev); break; default: err = -EOPNOTSUPP; @@ -978,14 +1212,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, }; -static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, - bool adding, char *mac, u16 vid, +static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, + char *mac, u16 vid, struct net_device *dev) { struct switchdev_notifier_fdb_info info; unsigned long notifier_type; - if (learning && learning_sync) { + if (learning_sync) { info.addr = mac; info.vid = vid; notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; @@ -1001,13 +1235,14 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, char mac[ETH_ALEN]; u8 local_port; u16 vid, fid; + bool do_notification = true; int err; mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) { dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); - return; + goto just_remove; } if (mlxsw_sp_fid_is_vfid(fid)) { @@ -1018,27 +1253,36 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, vfid); if (!mlxsw_sp_vport) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); - return; + goto just_remove; } - - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + vid = 0; /* Override the physical port with the vPort. */ mlxsw_sp_port = mlxsw_sp_vport; } else { vid = fid; } - err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, mac, fid, - adding && mlxsw_sp_port->learning, true); + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: + err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, + adding, true); if (err) { if (net_ratelimit()) netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); return; } - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, - mlxsw_sp_port->learning_sync, + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, vid, mlxsw_sp_port->dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; } static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, @@ -1046,17 +1290,19 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, bool adding) { struct mlxsw_sp_port *mlxsw_sp_port; + struct net_device *dev; char mac[ETH_ALEN]; u16 lag_vid = 0; u16 lag_id; u16 vid, fid; + bool do_notification = true; int err; mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); if (!mlxsw_sp_port) { dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); - return; + goto just_remove; } if (mlxsw_sp_fid_is_vfid(fid)) { @@ -1067,30 +1313,40 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, vfid); if (!mlxsw_sp_vport) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); - return; + goto just_remove; } - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - lag_vid = vid; + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + dev = mlxsw_sp_vport->dev; + vid = 0; /* Override the physical port with the vPort. */ mlxsw_sp_port = mlxsw_sp_vport; } else { + dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; vid = fid; } + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, - adding && mlxsw_sp_port->learning, - true); + adding, true); if (err) { if (net_ratelimit()) netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); return; } - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, - mlxsw_sp_port->learning_sync, - adding, mac, vid, - mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev); + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, + vid, dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; } static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, @@ -1136,6 +1392,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); + rtnl_lock(); do { mlxsw_reg_sfn_pack(sfn_pl); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); @@ -1148,6 +1405,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); } while (num_rec); + rtnl_unlock(); kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -1201,7 +1459,8 @@ int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) * with VID 1. */ mlxsw_sp_port->pvid = 1; - err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true); + err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, + true); if (err) { netdev_err(dev, "Unable to init VLANs\n"); return err; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index a10c928bbd6b..00cfd95ca59d 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -28,6 +28,16 @@ #include "moxart_ether.h" +static inline void moxart_desc_write(u32 data, u32 *desc) +{ + *desc = cpu_to_le32(data); +} + +static inline u32 moxart_desc_read(u32 *desc) +{ + return le32_to_cpu(*desc); +} + static inline void moxart_emac_write(struct net_device *ndev, unsigned int reg, unsigned long value) { @@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev) static void moxart_mac_setup_desc_ring(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - void __iomem *desc; + void *desc; int i; for (i = 0; i < TX_DESC_NUM; i++) { @@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; } - writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); + moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); priv->tx_head = 0; priv->tx_tail = 0; @@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) for (i = 0; i < RX_DESC_NUM; i++) { desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; memset(desc, 0, RX_REG_DESC_SIZE); - writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); - writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, + moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, desc + RX_REG_OFFSET_DESC1); priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; @@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) netdev_err(ndev, "DMA mapping error\n"); - writel(priv->rx_mapping[i], + moxart_desc_write(priv->rx_mapping[i], desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); - writel(priv->rx_buf[i], + moxart_desc_write((uintptr_t)priv->rx_buf[i], desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); } - writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); + moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); priv->rx_head = 0; @@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) napi); struct net_device *ndev = priv->ndev; struct sk_buff *skb; - void __iomem *desc; + void *desc; unsigned int desc0, len; int rx_head = priv->rx_head; int rx = 0; while (rx < budget) { desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); - desc0 = readl(desc + RX_REG_OFFSET_DESC0); + desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0); + rmb(); /* ensure desc0 is up to date */ if (desc0 & RX_DESC0_DMA_OWN) break; @@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) priv->stats.multicast++; rx_next: - writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + wmb(); /* prevent setting ownership back too early */ + moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); rx_head = RX_NEXT(rx_head); priv->rx_head = rx_head; @@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - void __iomem *desc; + void *desc; unsigned int len; unsigned int tx_head = priv->tx_head; u32 txdes1; @@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); spin_lock_irq(&priv->txlock); - if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { + if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { net_dbg_ratelimited("no TX space for packet\n"); priv->stats.tx_dropped++; goto out_unlock; } + rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */ len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; @@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->tx_len[tx_head] = len; priv->tx_skb[tx_head] = skb; - writel(priv->tx_mapping[tx_head], + moxart_desc_write(priv->tx_mapping[tx_head], desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); - writel(skb->data, + moxart_desc_write((uintptr_t)skb->data, desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); if (skb->len < ETH_ZLEN) { @@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); if (tx_head == TX_DESC_NUM_MASK) txdes1 |= TX_DESC1_END; - writel(txdes1, desc + TX_REG_OFFSET_DESC1); - writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); + moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1); + wmb(); /* flush descriptor before transferring ownership */ + moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); /* start to send packet */ writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 2be9280d608c..93a9563ac7c6 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -300,7 +300,7 @@ struct moxart_mac_priv_t { dma_addr_t rx_base; dma_addr_t rx_mapping[RX_DESC_NUM]; - void __iomem *rx_desc_base; + void *rx_desc_base; unsigned char *rx_buf_base; unsigned char *rx_buf[RX_DESC_NUM]; unsigned int rx_head; @@ -308,7 +308,7 @@ struct moxart_mac_priv_t { dma_addr_t tx_base; dma_addr_t tx_mapping[TX_DESC_NUM]; - void __iomem *tx_desc_base; + void *tx_desc_base; unsigned char *tx_buf_base; unsigned char *tx_buf[RX_DESC_NUM]; unsigned int tx_head; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50d5604833ed..e0993eba5df3 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) return IRQ_NONE; } -#ifdef CONFIG_PCI_MSI - static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; @@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) if (vdev->config.intr_type == MSI_X) pci_disable_msix(vdev->pdev); } -#endif static void vxge_rem_isr(struct vxgedev *vdev) { -#ifdef CONFIG_PCI_MSI - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && + vdev->config.intr_type == MSI_X) { vxge_rem_msix_isr(vdev); - } else -#endif - if (vdev->config.intr_type == INTA) { + } else if (vdev->config.intr_type == INTA) { synchronize_irq(vdev->pdev->irq); free_irq(vdev->pdev->irq, vdev); } @@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev) static int vxge_add_isr(struct vxgedev *vdev) { int ret = 0; -#ifdef CONFIG_PCI_MSI int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; int pci_fun = PCI_FUNC(vdev->pdev->devfn); - if (vdev->config.intr_type == MSI_X) + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) ret = vxge_enable_msix(vdev); if (ret) { @@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->config.intr_type = INTA; } - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) { for (intr_idx = 0; intr_idx < (vdev->no_of_vpath * VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { @@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; } -INTA_MODE: -#endif +INTA_MODE: if (vdev->config.intr_type == INTA) { snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge:INTA", vdev->ndev->name); @@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config, if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; -#ifndef CONFIG_PCI_MSI - vxge_debug_init(VXGE_ERR, - "%s: This Kernel does not support " - "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); - *intr_type = INTA; -#endif + if (!IS_ENABLED(CONFIG_PCI_MSI)) { + vxge_debug_init(VXGE_ERR, + "%s: This Kernel does not support " + "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); + *intr_type = INTA; + } /* Configure whether MSI-X or IRQL. */ switch (*intr_type) { diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 057665180f13..b1ce7aaa8f8b 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -797,7 +797,7 @@ static int lpc_mii_probe(struct net_device *ndev) netdev_info(ndev, "using MII interface\n"); else netdev_info(ndev, "using RMII interface\n"); - phydev = phy_connect(ndev, dev_name(&phydev->dev), + phydev = phy_connect(ndev, phydev_name(phydev), &lpc_handle_link_change, lpc_phy_interface_mode(&pldat->pdev->dev)); @@ -816,15 +816,14 @@ static int lpc_mii_probe(struct net_device *ndev) pldat->duplex = -1; pldat->phy_dev = phydev; - netdev_info(ndev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); + return 0; } static int lpc_mii_init(struct netdata_local *pldat) { - int err = -ENXIO, i; + int err = -ENXIO; pldat->mii_bus = mdiobus_alloc(); if (!pldat->mii_bus) { @@ -851,19 +850,10 @@ static int lpc_mii_init(struct netdata_local *pldat) pldat->mii_bus->priv = pldat; pldat->mii_bus->parent = &pldat->pdev->dev; - pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!pldat->mii_bus->irq) { - err = -ENOMEM; - goto err_out_1; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - pldat->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(pldat->pdev, pldat->mii_bus); if (mdiobus_register(pldat->mii_bus)) - goto err_out_free_mdio_irq; + goto err_out_unregister_bus; if (lpc_mii_probe(pldat->ndev) != 0) goto err_out_unregister_bus; @@ -872,9 +862,6 @@ static int lpc_mii_init(struct netdata_local *pldat) err_out_unregister_bus: mdiobus_unregister(pldat->mii_bus); -err_out_free_mdio_irq: - kfree(pldat->mii_bus->irq); -err_out_1: mdiobus_free(pldat->mii_bus); err_out: return err; diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig index db19c6f49859..7c92e8306c19 100644 --- a/drivers/net/ethernet/pasemi/Kconfig +++ b/drivers/net/ethernet/pasemi/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_PASEMI bool "PA Semi devices" default y - depends on PPC_PASEMI && PCI && INET + depends on PPC_PASEMI && PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -18,9 +18,8 @@ if NET_VENDOR_PASEMI config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" - depends on PPC_PASEMI && PCI && INET + depends on PPC_PASEMI && PCI select PHYLIB - select INET_LRO ---help--- This driver supports the on-chip 1/10Gbit Ethernet controller on PA Semi's PWRficient line of chips. diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 57a6e6cd74fc..af54df52aa6b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -30,9 +30,7 @@ #include <linux/skbuff.h> #include <linux/ip.h> -#include <linux/tcp.h> #include <net/checksum.h> -#include <linux/inet_lro.h> #include <linux/prefetch.h> #include <asm/irq.h> @@ -52,12 +50,9 @@ * * - Multicast support * - Large MTU support - * - SW LRO * - Multiqueue RX/TX */ -#define LRO_MAX_AGGR 64 - #define PE_MIN_MTU 64 #define PE_MAX_MTU 9000 #define PE_DEF_MTU ETH_DATA_LEN @@ -257,37 +252,6 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) return 0; } -static int get_skb_hdr(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *data) -{ - u64 macrx = (u64) data; - unsigned int ip_len; - struct iphdr *iph; - - /* IPv4 header checksum failed */ - if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) - return -1; - - /* non tcp packet */ - skb_reset_network_header(skb); - iph = ip_hdr(skb); - if (iph->protocol != IPPROTO_TCP) - return -1; - - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - *tcph = tcp_hdr(skb); - - /* check if ip header and tcp header are complete */ - if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) - return -1; - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *iphdr = iph; - - return 0; -} - static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, const int nfrags, struct sk_buff *skb, @@ -817,7 +781,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, skb_put(skb, len-4); skb->protocol = eth_type_trans(skb, mac->netdev); - lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); + napi_gro_receive(&mac->napi, skb); next: RX_DESC(rx, n) = 0; @@ -839,8 +803,6 @@ next: rx_ring(mac)->next_to_clean = n; - lro_flush_all(&mac->lro_mgr); - /* Increase is in number of 16-byte entries, and since each descriptor * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with * count*2. @@ -1754,16 +1716,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_GSO; - mac->lro_mgr.max_aggr = LRO_MAX_AGGR; - mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; - mac->lro_mgr.lro_arr = mac->lro_desc; - mac->lro_mgr.get_skb_header = get_skb_hdr; - mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - mac->lro_mgr.dev = mac->netdev; - mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; - - mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); if (!mac->dma_pdev) { dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h index a5807703ab96..161c99a98403 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.h +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -31,7 +31,6 @@ #define CS_RING_SIZE (TX_RING_SIZE*2) -#define MAX_LRO_DESCRIPTORS 8 #define MAX_CS 2 struct pasemi_mac_txring { @@ -84,10 +83,7 @@ struct pasemi_mac { u8 mac_addr[ETH_ALEN]; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; struct timer_list rxtimer; - unsigned int lro_max_aggr; struct pasemi_mac_txring *tx; struct pasemi_mac_rxring *rx; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c index 25fae568261f..f046bfc18e7d 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c @@ -20,7 +20,6 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> -#include <linux/inet_lro.h> #include <asm/pasemi_dma.h> #include "pasemi_mac.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1292c360390c..d34da638b5d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -70,8 +70,8 @@ struct qed_sb_sp_info; struct qed_mcp_info; struct qed_rt_data { - u32 init_val; - bool b_valid; + u32 *init_val; + bool *b_valid; }; /* The PCI personality is not quite synonymous to protocol ID: @@ -120,6 +120,10 @@ enum QED_PORT_MODE { QED_PORT_MODE_DE_1X25G }; +enum qed_dev_cap { + QED_DEV_CAP_ETH, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -151,6 +155,7 @@ struct qed_hw_info { u32 port_mode; u32 hw_mode; + unsigned long device_capabilities; }; struct qed_hw_cid_data { @@ -267,7 +272,7 @@ struct qed_hwfn { struct qed_hw_info hw_info; /* rt_array (for init-tool) */ - struct qed_rt_data *rt_data; + struct qed_rt_data rt_data; /* SPQ */ struct qed_spq *p_spq; @@ -350,9 +355,20 @@ struct qed_dev { char name[NAME_SIZE]; u8 type; -#define QED_DEV_TYPE_BB_A0 (0 << 0) -#define QED_DEV_TYPE_MASK (0x3) -#define QED_DEV_TYPE_SHIFT (0) +#define QED_DEV_TYPE_BB (0 << 0) +#define QED_DEV_TYPE_AH BIT(0) +/* Translate type/revision combo into the proper conditions */ +#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) +#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ + CHIP_REV_IS_A0(dev)) +#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ + CHIP_REV_IS_B0(dev)) + +#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \ + QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2) + + u16 vendor_id; + u16 device_id; u16 chip_num; #define CHIP_NUM_MASK 0xffff @@ -361,6 +377,8 @@ struct qed_dev { u16 chip_rev; #define CHIP_REV_MASK 0xf #define CHIP_REV_SHIFT 12 +#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev) +#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) u16 chip_metal; #define CHIP_METAL_MASK 0xff @@ -375,10 +393,10 @@ struct qed_dev { u8 num_funcs_in_port; u8 path_id; - enum mf_mode mf_mode; -#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF) -#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR) -#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN) + enum qed_mf_mode mf_mode; +#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) +#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) +#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) int pcie_width; int pcie_speed; @@ -441,11 +459,6 @@ struct qed_dev { const struct firmware *firmware; }; -#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \ - QED_DEV_TYPE_SHIFT) -#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0) -#define QED_IS_BB(dev) (QED_IS_BB_A0(dev)) - #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7ccdb46c6764..d3f7a0215e7e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn) params.num_pf_cids = iids.cids; params.start_pq = qm_info->start_pq; params.num_pf_pqs = qm_info->num_pqs; - params.start_vport = qm_info->num_vports; + params.start_vport = qm_info->start_vport; + params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; params.pq_params = qm_info->qm_pq_params; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 817bbd5476ff..bc17ed2c9cac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -341,11 +341,6 @@ void qed_resc_setup(struct qed_dev *cdev) } } -#define FINAL_CLEANUP_CMD_OFFSET (0) -#define FINAL_CLEANUP_CMD (0x1) -#define FINAL_CLEANUP_VALID_OFFSET (6) -#define FINAL_CLEANUP_VFPF_ID_SHIFT (7) -#define FINAL_CLEANUP_COMP (0x2) #define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_TIME (10) int qed_final_cleanup(struct qed_hwfn *p_hwfn, @@ -355,12 +350,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; - addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET; + addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); - command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET; - command |= 1 << FINAL_CLEANUP_VALID_OFFSET; - command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT; - command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT; + command |= X_FINAL_CLEANUP_AGG_INT << + SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; + command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; + command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; + command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; /* Make sure notification is not set before initiating final cleanup */ if (REG_RD(p_hwfn, addr)) { @@ -415,18 +412,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) } switch (p_hwfn->cdev->mf_mode) { - case SF: - hw_mode |= 1 << MODE_SF; + case QED_MF_DEFAULT: + case QED_MF_NPAR: + hw_mode |= 1 << MODE_MF_SI; break; - case MF_OVLAN: + case QED_MF_OVLAN: hw_mode |= 1 << MODE_MF_SD; break; - case MF_NPAR: - hw_mode |= 1 << MODE_MF_SI; - break; default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n"); - hw_mode |= 1 << MODE_SF; + DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + hw_mode |= 1 << MODE_MF_SI; } hw_mode |= 1 << MODE_ASIC; @@ -1018,8 +1013,7 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) u32 *resc_num = p_hwfn->hw_info.resc_num; int num_funcs, i; - num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB - : p_hwfn->cdev->num_ports_in_engines; + num_funcs = MAX_NUM_PFS_BB; resc_num[QED_SB] = min_t(u32, (MAX_SB_PER_PATH_BB / num_funcs), @@ -1071,7 +1065,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; - u32 port_cfg_addr, link_temp, val, nvm_cfg_addr; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -1134,21 +1128,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, break; } - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) + - offsetof(struct nvm_cfg1_func, device_id); - val = qed_rd(p_hwfn, p_ptt, addr); - - if (IS_MF(p_hwfn)) { - p_hwfn->hw_info.device_id = - (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >> - NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET; - } else { - p_hwfn->hw_info.device_id = - (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >> - NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET; - } - /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -1220,18 +1199,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_mode = MF_OVLAN; + p_hwfn->cdev->mf_mode = QED_MF_OVLAN; break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_mode = MF_NPAR; + p_hwfn->cdev->mf_mode = QED_MF_NPAR; break; - case NVM_CFG1_GLOB_MF_MODE_FORCED_SF: - p_hwfn->cdev->mf_mode = SF; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; break; } DP_INFO(p_hwfn, "Multi function mode is %08x\n", p_hwfn->cdev->mf_mode); + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, device_capabilities); + + device_capabilities = qed_rd(p_hwfn, p_ptt, addr); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) + __set_bit(QED_DEV_CAP_ETH, + &p_hwfn->hw_info.device_capabilities); + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } @@ -1293,29 +1282,36 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, static void qed_get_dev_info(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); u32 tmp; - cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + /* Read Vendor Id / Device Id */ + pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, + &cdev->vendor_id); + pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, + &cdev->device_id); + cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM); - cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_REV); MASK_FIELD(CHIP_REV, cdev->chip_rev); + cdev->type = QED_DEV_TYPE_BB; /* Learn number of HW-functions */ - tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); - if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) { + if (tmp & (1 << p_hwfn->rel_pf_id)) { DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); cdev->num_hwfns = 2; } else { cdev->num_hwfns = 1; } - cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_TEST_REG) >> 4; MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); - cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_METAL); MASK_FIELD(CHIP_METAL, cdev->chip_metal); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 264e954675d1..49bbf696a16d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -34,6 +34,8 @@ enum common_event_opcode { COMMON_EVENT_RESERVED3, COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED5, + COMMON_EVENT_RESERVED6, + COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE }; @@ -45,6 +47,7 @@ enum common_ramrod_cmd_id { COMMON_RAMROD_RESERVED, COMMON_RAMROD_RESERVED2, COMMON_RAMROD_RESERVED3, + COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID }; @@ -331,6 +334,179 @@ struct xstorm_core_conn_ag_ctx { __le16 word15 /* word15 */; }; +struct tstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 word1 /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_core_conn_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 word1 /* word1 */; + __le32 rx_producers /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; +}; + /* The core storm context for the Mstorm */ struct mstorm_core_conn_st_ctx { __le32 reserved[24]; @@ -349,8 +525,9 @@ struct core_conn_context { struct regpair pstorm_st_padding[2]; struct xstorm_core_conn_st_ctx xstorm_st_context; struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; struct mstorm_core_conn_st_ctx mstorm_st_context; - struct regpair mstorm_st_padding[2]; struct ustorm_core_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2] /* padding */; }; @@ -397,10 +574,12 @@ union event_ring_element { }; enum personality_type { + BAD_PERSONALITY_TYP, PERSONALITY_RESERVED, PERSONALITY_RESERVED2, PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */, PERSONALITY_RESERVED3, + PERSONALITY_CORE, PERSONALITY_ETH /* Ethernet */, PERSONALITY_RESERVED4, MAX_PERSONALITY_TYPE @@ -570,7 +749,7 @@ enum block_addr { GRCBASE_NWM = 0x800000, GRCBASE_NWS = 0x700000, GRCBASE_MS = 0x6a0000, - GRCBASE_PHY_PCIE = 0x618000, + GRCBASE_PHY_PCIE = 0x620000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -795,13 +974,13 @@ enum init_modes { MODE_RESERVED3, MODE_RESERVED4, MODE_RESERVED5, + MODE_RESERVED6, MODE_SF, MODE_MF_SD, MODE_MF_SI, MODE_PORTS_PER_ENG_1, MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, - MODE_40G, MODE_100G, MODE_EAGLE_ENG1_WORKAROUND, MAX_INIT_MODES @@ -816,43 +995,6 @@ enum init_phases { MAX_INIT_PHASES }; -struct mstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0 /* word0 */; - __le16 word1 /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; -}; - /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -945,6 +1087,17 @@ struct qm_rf_pq_map { #define QM_RF_PQ_MAP_RESERVED_SHIFT 26 }; +/* Completion params for aggregated interrupt completion */ +struct sdm_agg_int_comp_params { + __le16 params; +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 +}; + /* SDM operation gen command (generate aggregative interrupt) */ struct sdm_op_gen { __le32 command; @@ -956,223 +1109,6 @@ struct sdm_op_gen { #define SDM_OP_GEN_RESERVED_SHIFT 20 }; -struct tstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 word1 /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; -}; - -struct ustorm_core_conn_ag_ctx { - u8 reserved /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 - u8 flags2; -#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 word1 /* word1 */; - __le32 rx_producers /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; -}; - -struct ystorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le16 word1 /* word1 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; - __le16 word4 /* word4 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; -}; - /*********************************** Init ************************************/ /* Width of GRC address in bits (addresses are specified in dwords) */ @@ -1274,13 +1210,6 @@ enum chip_ids { MAX_CHIP_IDS }; -enum idle_chk_severity_types { - IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */, - IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, - IDLE_CHK_SEVERITY_WARNING, - MAX_IDLE_CHK_SEVERITY_TYPES -}; - struct init_array_raw_hdr { __le32 data; #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF @@ -1340,14 +1269,6 @@ struct init_callback_op { __le16 block_id /* Blocks ID */; }; -/* init comparison types */ -enum init_comparison_types { - INIT_COMPARISON_EQ /* init value is included in the init command */, - INIT_COMPARISON_OR /* init value is all zeros */, - INIT_COMPARISON_AND /* init value is an array of values */, - MAX_INIT_COMPARISON_TYPES -}; - /* init operation: delay */ struct init_delay_op { __le32 op_data; @@ -1444,12 +1365,10 @@ struct init_read_op { __le32 op_data; #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 -#define INIT_READ_OP_POLL_COMP_MASK 0x7 -#define INIT_READ_OP_POLL_COMP_SHIFT 4 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 #define INIT_READ_OP_RESERVED_MASK 0x1 -#define INIT_READ_OP_RESERVED_SHIFT 7 -#define INIT_READ_OP_POLL_MASK 0x1 -#define INIT_READ_OP_POLL_SHIFT 8 +#define INIT_READ_OP_RESERVED_SHIFT 8 #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 __le32 expected_val; @@ -1477,6 +1396,14 @@ enum init_op_types { MAX_INIT_OP_TYPES }; +enum init_poll_types { + INIT_POLL_NONE /* No polling */, + INIT_POLL_EQ /* init value is included in the init command */, + INIT_POLL_OR /* init value is all zeros */, + INIT_POLL_AND /* init value is an array of values */, + MAX_INIT_POLL_TYPES +}; + /* init source types */ enum init_source_types { INIT_SRC_INLINE /* init value is included in the init command */, @@ -1677,175 +1604,213 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, u16 num_pqs); /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) /* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \ - ((port_id) * \ - IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[2].base + ((port_id) * IRO[2].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) /* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \ - ((vf_id) * \ - IRO[2].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size) +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ + (IRO[3].base + ((vf_id) * IRO[3].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) /* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size) +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1)) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) /* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \ - ((pf_id) * \ - IRO[4].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[4].size) -/* Ustorm Completion ring consumer */ -#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \ - ((global_queue_id) * \ - IRO[5].m1)) -#define USTORM_CQ_CONS_SIZE (IRO[5].size) +#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[5].size) +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \ + (IRO[6].base + ((global_queue_id) * IRO[6].m1)) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size) /* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size) +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) /* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) /* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) /* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) /* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) /* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) /* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \ - ((core_rx_queue_id) * \ - IRO[12].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size) -/* Tstorm LiteL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \ - ((core_rx_q_id) * \ - IRO[13].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size) +#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ + (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size) +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) /* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \ - ((core_rx_q_id) * \ - IRO[14].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) /* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \ - ((core_txst_id) * \ - IRO[15].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) /* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \ - ((stat_counter_id) * \ - IRO[16].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size) +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[17].base + ((stat_counter_id) * IRO[17].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size) /* Mstorm producers */ -#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \ - ((queue_id) * \ - IRO[17].m1)) -#define MSTORM_PRODS_SIZE (IRO[17].size) +#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1)) +#define MSTORM_PRODS_SIZE (IRO[18].size) /* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size) +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size) /* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \ - ((stat_counter_id) * \ - IRO[19].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[19].size) +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[20].base + ((stat_counter_id) * IRO[20].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[20].size) /* Ustorm queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \ - ((queue_id) * \ - IRO[20].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size) +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[21].base + ((queue_id) * IRO[21].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size) /* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \ - ((stat_counter_id) * \ - IRO[21].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size) +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size) /* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \ - ((pf_id) * \ - IRO[22].m1)) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size) +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size) /* Ystorm queue zone */ -#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \ - ((queue_id) * \ - IRO[23].m1)) -#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size) +#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[25].base + ((queue_id) * IRO[25].m1)) +#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size) /* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \ - ((rss_id) * \ - IRO[24].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size) +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[26].base + ((rss_id) * IRO[26].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size) /* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \ - ((rss_id) * \ - IRO[25].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size) +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[27].base + ((rss_id) * IRO[27].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size) /* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \ - ((pf_id) * \ - IRO[26].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size) +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[28].base + ((pf_id) * IRO[28].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size) /* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \ - ((cmdq_queue_id) * \ - IRO[27].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size) +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ + (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size) /* Mstorm rq-cons of given queue-id */ -#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \ - ((rq_queue_id) * \ - IRO[28].m1)) -#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size) +#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \ + (IRO[30].base + ((rq_queue_id) * IRO[30].m1)) +#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size) +/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size) +/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */ +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size) +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[33].base + ((pf_id) * IRO[33].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size) +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[34].base + ((pf_id) * IRO[34].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size) +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[35].base + ((pf_id) * IRO[35].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size) +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[36].base + ((pf_id) * IRO[36].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size) +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size) +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size) +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size) +/* Mstorm FCoE RX stats */ +#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size) +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size) /* Pstorm RoCE statistics */ -#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \ - ((stat_counter_id) * \ - IRO[29].m1)) -#define PSTORM_ROCE_STAT_SIZE (IRO[29].size) +#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ + (IRO[42].base + ((stat_counter_id) * IRO[42].m1)) +#define PSTORM_ROCE_STAT_SIZE (IRO[42].size) /* Tstorm RoCE statistics */ -#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \ - ((stat_counter_id) * \ - IRO[30].m1)) -#define TSTORM_ROCE_STAT_SIZE (IRO[30].size) - -static const struct iro iro_arr[31] = { - { 0x10, 0x0, 0x0, 0x0, 0x8 }, - { 0x4448, 0x60, 0x0, 0x0, 0x60 }, - { 0x498, 0x8, 0x0, 0x0, 0x4 }, - { 0x494, 0x0, 0x0, 0x0, 0x4 }, - { 0x10, 0x8, 0x0, 0x0, 0x2 }, - { 0x90, 0x8, 0x0, 0x0, 0x2 }, - { 0x4540, 0x0, 0x0, 0x0, 0xf8 }, - { 0x39e0, 0x0, 0x0, 0x0, 0xf8 }, - { 0x2598, 0x0, 0x0, 0x0, 0xf8 }, - { 0x4350, 0x0, 0x0, 0x0, 0xf8 }, - { 0x52d0, 0x0, 0x0, 0x0, 0xf8 }, - { 0x7a48, 0x0, 0x0, 0x0, 0xf8 }, - { 0x100, 0x8, 0x0, 0x0, 0x8 }, - { 0x5808, 0x10, 0x0, 0x0, 0x10 }, - { 0xb100, 0x30, 0x0, 0x0, 0x30 }, - { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, - { 0x54f8, 0x40, 0x0, 0x0, 0x40 }, - { 0x200, 0x10, 0x0, 0x0, 0x8 }, - { 0x9e70, 0x0, 0x0, 0x0, 0x4 }, - { 0x7ca0, 0x40, 0x0, 0x0, 0x30 }, - { 0xd00, 0x8, 0x0, 0x0, 0x8 }, - { 0x2790, 0x80, 0x0, 0x0, 0x38 }, - { 0xa520, 0xf0, 0x0, 0x0, 0xf0 }, - { 0x80, 0x8, 0x0, 0x0, 0x8 }, - { 0xac0, 0x8, 0x0, 0x0, 0x8 }, - { 0x2580, 0x8, 0x0, 0x0, 0x8 }, - { 0x2500, 0x8, 0x0, 0x0, 0x8 }, - { 0x440, 0x8, 0x0, 0x0, 0x2 }, - { 0x1800, 0x8, 0x0, 0x0, 0x2 }, - { 0x27c8, 0x80, 0x0, 0x0, 0x10 }, - { 0x4710, 0x10, 0x0, 0x0, 0x10 }, +#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ + (IRO[43].base + ((stat_counter_id) * IRO[43].m1)) +#define TSTORM_ROCE_STAT_SIZE (IRO[43].size) + +static const struct iro iro_arr[44] = { + { 0x10, 0x0, 0x0, 0x0, 0x8 }, + { 0x47c8, 0x60, 0x0, 0x0, 0x60 }, + { 0x5e30, 0x20, 0x0, 0x0, 0x20 }, + { 0x510, 0x8, 0x0, 0x0, 0x4 }, + { 0x490, 0x8, 0x0, 0x0, 0x4 }, + { 0x10, 0x8, 0x0, 0x0, 0x2 }, + { 0x90, 0x8, 0x0, 0x0, 0x2 }, + { 0x4940, 0x0, 0x0, 0x0, 0x78 }, + { 0x3de0, 0x0, 0x0, 0x0, 0x78 }, + { 0x2998, 0x0, 0x0, 0x0, 0x78 }, + { 0x4750, 0x0, 0x0, 0x0, 0x78 }, + { 0x56d0, 0x0, 0x0, 0x0, 0x78 }, + { 0x7e50, 0x0, 0x0, 0x0, 0x78 }, + { 0x100, 0x8, 0x0, 0x0, 0x8 }, + { 0x5c10, 0x10, 0x0, 0x0, 0x10 }, + { 0xb508, 0x30, 0x0, 0x0, 0x30 }, + { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, + { 0x58a0, 0x40, 0x0, 0x0, 0x40 }, + { 0x200, 0x10, 0x0, 0x0, 0x8 }, + { 0xa230, 0x0, 0x0, 0x0, 0x4 }, + { 0x8058, 0x40, 0x0, 0x0, 0x30 }, + { 0xd00, 0x8, 0x0, 0x0, 0x8 }, + { 0x2b30, 0x80, 0x0, 0x0, 0x38 }, + { 0xa808, 0x0, 0x0, 0x0, 0xf0 }, + { 0xa8f8, 0x8, 0x0, 0x0, 0x8 }, + { 0x80, 0x8, 0x0, 0x0, 0x8 }, + { 0xac0, 0x8, 0x0, 0x0, 0x8 }, + { 0x2580, 0x8, 0x0, 0x0, 0x8 }, + { 0x2500, 0x8, 0x0, 0x0, 0x8 }, + { 0x440, 0x8, 0x0, 0x0, 0x2 }, + { 0x1800, 0x8, 0x0, 0x0, 0x2 }, + { 0x1a00, 0x10, 0x8, 0x0, 0x2 }, + { 0x640, 0x10, 0x8, 0x0, 0x2 }, + { 0xd9b8, 0x38, 0x0, 0x0, 0x24 }, + { 0x11048, 0x10, 0x0, 0x0, 0x8 }, + { 0x11678, 0x38, 0x0, 0x0, 0x18 }, + { 0xaec0, 0x30, 0x0, 0x0, 0x10 }, + { 0x8700, 0x28, 0x0, 0x0, 0x18 }, + { 0xec00, 0x10, 0x0, 0x0, 0x10 }, + { 0xde38, 0x40, 0x0, 0x0, 0x30 }, + { 0x121a8, 0x38, 0x0, 0x0, 0x8 }, + { 0xf068, 0x20, 0x0, 0x0, 0x20 }, + { 0x2b68, 0x80, 0x0, 0x0, 0x10 }, + { 0x4ab8, 0x10, 0x0, 0x0, 0x10 }, }; /* Runtime array offsets */ @@ -1866,426 +1831,427 @@ static const struct iro iro_arr[31] = { #define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 #define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 #define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 #define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 #define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 #define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2232 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 #define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6663 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 #define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6665 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 #define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6667 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703 #define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713 #define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129 #define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29834 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29836 #define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856 #define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29901 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29902 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29903 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29904 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29964 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29903 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29904 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29966 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30505 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30507 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30762 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30764 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30764 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30766 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30796 +#define QM_REG_RLPFCRD_RT_OFFSET 30798 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30812 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814 +#define QM_REG_RLPFENABLE_RT_OFFSET 30814 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30846 +#define QM_REG_WFQPFCRD_RT_OFFSET 30848 #define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31006 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31007 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31008 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31009 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31520 +#define QM_REG_TXPQMAP_RT_OFFSET 31522 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544 -#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 33056 +#define QM_REG_WFQVPCRD_RT_OFFSET 32546 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33568 +#define QM_REG_WFQVPMAP_RT_OFFSET 33058 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741 #define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431 - -#define RUNTIME_ARRAY_SIZE 34432 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922 + +#define RUNTIME_ARRAY_SIZE 33923 -/* The eth storm context for the Ystorm */ -struct ystorm_eth_conn_st_ctx { +/* The eth storm context for the Tstorm */ +struct tstorm_eth_conn_st_ctx { __le32 reserved[4]; }; @@ -2562,14 +2528,226 @@ struct xstorm_eth_conn_ag_ctx { __le16 word15 /* word15 */; }; -/* The eth storm context for the Tstorm */ -struct tstorm_eth_conn_st_ctx { - __le32 reserved[4]; +/* The eth storm context for the Ystorm */ +struct ystorm_eth_conn_st_ctx { + __le32 reserved[8]; }; -/* The eth storm context for the Mstorm */ -struct mstorm_eth_conn_st_ctx { - __le32 reserved[8]; +struct ystorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 terminate_spqe /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 tx_bd_cons_upd /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +struct tstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 rx_bd_cons /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 rx_bd_prod /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 + u8 flags2; +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 tx_bd_cons /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 tx_int_coallecing_timeset /* reg3 */; + __le16 tx_drv_bd_cons /* word2 */; + __le16 rx_drv_cqe_cons /* word3 */; }; /* The eth storm context for the Ustorm */ @@ -2577,24 +2755,30 @@ struct ustorm_eth_conn_st_ctx { __le32 reserved[40]; }; +/* The eth storm context for the Mstorm */ +struct mstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + /* eth connection context */ struct eth_conn_context { - struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2] /* padding */; + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; - struct regpair pstorm_st_padding[2] /* padding */; struct xstorm_eth_conn_st_ctx xstorm_st_context; struct xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct tstorm_eth_conn_st_ctx tstorm_st_context; - struct regpair tstorm_st_padding[2] /* padding */; - struct mstorm_eth_conn_st_ctx mstorm_st_context; + struct ystorm_eth_conn_st_ctx ystorm_st_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; + struct mstorm_eth_conn_st_ctx mstorm_st_context; }; enum eth_filter_action { ETH_FILTER_ACTION_REMOVE, ETH_FILTER_ACTION_ADD, - ETH_FILTER_ACTION_REPLACE, + ETH_FILTER_ACTION_REMOVE_ALL, MAX_ETH_FILTER_ACTION }; @@ -2653,6 +2837,32 @@ enum eth_ramrod_cmd_id { MAX_ETH_RAMROD_CMD_ID }; +enum eth_tx_err { + ETH_TX_ERR_DROP /* Drop erronous packet. */, + ETH_TX_ERR_ASSERT_MALICIOUS, + MAX_ETH_TX_ERR +}; + +struct eth_tx_err_vals { + __le16 values; +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +}; + struct eth_vport_rss_config { __le16 capabilities; #define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 @@ -2669,12 +2879,8 @@ struct eth_vport_rss_config { #define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8 -#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F -#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 u8 rss_id; u8 rss_mode; u8 update_rss_key; @@ -2749,10 +2955,14 @@ struct rx_queue_start_ramrod_data { u8 pxp_tph_valid_pkt; u8 pxp_st_hint; __le16 pxp_st_index; - u8 reserved[4]; - struct regpair cqe_pbl_addr; - struct regpair bd_base; - struct regpair sge_base; + u8 pmd_mode; + u8 notify_en; + u8 toggle_val; + u8 reserved[7]; + __le16 reserved1; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair reserved2; }; struct rx_queue_stop_ramrod_data { @@ -2764,23 +2974,24 @@ struct rx_queue_stop_ramrod_data { }; struct rx_queue_update_ramrod_data { - __le16 rx_queue_id; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 init_sge_ring_flg; - u8 vport_id; - u8 pxp_tph_valid_sge; - u8 pxp_st_hint; - __le16 pxp_st_index; - u8 reserved[6]; - struct regpair sge_base; + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[4]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; + struct regpair reserved6; }; struct tx_queue_start_ramrod_data { __le16 sb_id; u8 sb_index; u8 vport_id; - u8 tc; + u8 reserved0; u8 stats_counter_id; __le16 qm_pq_id; u8 flags; @@ -2790,18 +3001,25 @@ struct tx_queue_start_ramrod_data { #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F -#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3 - u8 pin_context; - u8 pxp_tph_valid_bd; - u8 pxp_tph_valid_pkt; - __le16 pxp_st_index; - u8 pxp_st_hint; - u8 reserved1[3]; - __le16 queue_zone_id; - __le16 test_dup_count; - __le16 pbl_size; - struct regpair pbl_base_addr; +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 + u8 pxp_st_hint; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + __le16 comp_agg_size; + __le16 queue_zone_id; + __le16 test_dup_count; + __le16 pbl_size; + __le16 tx_queue_id; + struct regpair pbl_base_addr; + struct regpair bd_cons_address; }; struct tx_queue_stop_ramrod_data { @@ -2822,16 +3040,16 @@ struct vport_start_ramrod_data { struct eth_vport_rx_mode rx_mode; struct eth_vport_tx_mode tx_mode; struct eth_vport_tpa_param tpa_param; - __le16 sge_buff_size; - u8 max_sges_num; - u8 tx_switching_en; - u8 anti_spoofing_en; - u8 default_vlan_en; - u8 handle_ptp_pkts; - u8 silent_vlan_removal_en; - __le16 default_vlan; - u8 untagged; - u8 reserved[7]; + __le16 default_vlan; + u8 tx_switching_en; + u8 anti_spoofing_en; + u8 default_vlan_en; + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + u8 untagged; + struct eth_tx_err_vals tx_err_behav; + u8 zero_placement_offset; + u8 reserved[7]; }; struct vport_stop_ramrod_data { @@ -2840,36 +3058,35 @@ struct vport_stop_ramrod_data { }; struct vport_update_ramrod_data_cmn { - u8 vport_id; - u8 update_rx_active_flg; - u8 rx_active_flg; - u8 update_tx_active_flg; - u8 tx_active_flg; - u8 update_rx_mode_flg; - u8 update_tx_mode_flg; - u8 update_approx_mcast_flg; - u8 update_rss_flg; - u8 update_inner_vlan_removal_en_flg; - u8 inner_vlan_removal_en; - u8 update_tpa_param_flg; - u8 update_tpa_en_flg; - u8 update_sge_param_flg; - __le16 sge_buff_size; - u8 max_sges_num; - u8 update_tx_switching_en_flg; - u8 tx_switching_en; - u8 update_anti_spoofing_en_flg; - u8 anti_spoofing_en; - u8 update_handle_ptp_pkts; - u8 handle_ptp_pkts; - u8 update_default_vlan_en_flg; - u8 default_vlan_en; - u8 update_default_vlan_flg; - __le16 default_vlan; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - u8 silent_vlan_removal_en; - u8 reserved; + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_tx_switching_en_flg; + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + u8 default_vlan_en; + u8 update_default_vlan_flg; + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 update_mtu_flg; + __le16 mtu; + u8 reserved[2]; }; struct vport_update_ramrod_mcast { @@ -2885,436 +3102,6 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct mstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */ -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0 /* word0 */; - __le16 word1 /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; -}; - -struct tstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 rx_bd_cons /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 rx_bd_prod /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; -}; - -struct ustorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 - u8 flags2; -#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 tx_bd_cons /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 tx_drv_bd_cons /* word2 */; - __le16 rx_drv_cqe_cons /* word3 */; -}; - -struct xstorm_eth_hw_conn_ag_ctx { - u8 reserved0 /* cdu_validation */; - u8 eth_state /* state */; - u8 flags0; -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 - u8 flags1; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 - u8 flags2; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 - u8 flags3; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 - u8 flags4; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 - u8 flags5; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 - u8 flags6; -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 - u8 flags7; -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 - u8 flags8; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 - u8 flags9; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 - u8 flags10; -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 - u8 flags11; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 - u8 flags12; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 - u8 flags13; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 - u8 flags14; -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 - u8 edpm_event_id /* byte2 */; - __le16 physical_q0 /* physical_q0 */; - __le16 word1 /* physical_q1 */; - __le16 edpm_num_bds /* physical_q2 */; - __le16 tx_bd_cons /* word3 */; - __le16 tx_bd_prod /* word4 */; - __le16 go_to_bd_cons /* word5 */; - __le16 conn_dpi /* conn_dpi */; -}; - #define VF_MAX_STATIC 192 /* In case of K2 */ #define MCP_GLOB_PATH_MAX 2 @@ -3818,6 +3605,10 @@ struct public_port { struct dcbx_local_params local_admin_dcbx_mib; struct dcbx_mib remote_dcbx_mib; struct dcbx_mib operational_dcbx_mib; + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + u32 transceiver_data; }; /**************************************/ @@ -3830,7 +3621,11 @@ struct public_func { u32 iscsi_boot_signature; u32 iscsi_boot_block_offset; - u32 reserved[8]; + u32 mtu_size; + u32 c2s_pcp_map_lower; + u32 c2s_pcp_map_upper; + u32 c2s_pcp_map_default; + u32 reserved[4]; u32 config; @@ -3894,10 +3689,10 @@ struct public_func { #define DRV_ID_MCP_HSI_VER_SHIFT 16 #define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT) -#define DRV_ID_DRV_TYPE_MASK 0xff000000 +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 #define DRV_ID_DRV_TYPE_SHIFT 24 #define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT) @@ -3905,6 +3700,10 @@ struct public_func { #define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) }; /**************************************/ @@ -3964,6 +3763,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_MASK 0xffff0000 #define DRV_MSG_CODE_LOAD_REQ 0x10000000 #define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_INIT_HW 0x12000000 #define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 #define DRV_MSG_CODE_INIT_PHY 0x22000000 @@ -4100,6 +3900,7 @@ struct public_drv_mb { #define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 #define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 #define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 +#define FW_MSG_CODE_OK 0x00160000 #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff @@ -4212,7 +4013,7 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 #define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 #define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 #define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 #define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 #define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 @@ -4643,8 +4444,12 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 - - u32 reserved[46]; /* 0x88 */ + u32 device_capabilities; /* 0x88 */ +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 + u32 power_dissipated; /* 0x8C */ + u32 power_consumed; /* 0x90 */ + u32 efi_version; /* 0x94 */ + u32 reserved[42]; /* 0x98 */ }; struct nvm_cfg1_path { @@ -4652,26 +4457,8 @@ struct nvm_cfg1_path { }; struct nvm_cfg1_port { - u32 power_dissipated; /* 0x0 */ -#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF -#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0 -#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00 -#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8 -#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000 -#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16 -#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000 -#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24 - - u32 power_consumed; /* 0x4 */ -#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF -#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0 -#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00 -#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8 -#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000 -#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16 -#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000 -#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24 - + u32 reserved__m_relocated_to_option_123; /* 0x0 */ + u32 reserved__m_relocated_to_option_124; /* 0x4 */ u32 generic_cont0; /* 0x8 */ #define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF #define NVM_CFG1_PORT_LED_MODE_OFFSET 0 @@ -4699,7 +4486,9 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 - +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 u32 pcie_cfg; /* 0xC */ #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 #define NVM_CFG1_PORT_RESERVED15_OFFSET 0 @@ -4784,10 +4573,11 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31 #define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 #define NVM_CFG1_PORT_AN_MODE_OFFSET 24 #define NVM_CFG1_PORT_AN_MODE_NONE 0x0 @@ -4801,9 +4591,6 @@ struct nvm_cfg1_port { u32 mgmt_traffic; /* 0x20 */ #define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F #define NVM_CFG1_PORT_RESERVED61_OFFSET 0 -#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1 -#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2 u32 ext_phy; /* 0x24 */ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF @@ -4814,16 +4601,12 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 u32 mba_cfg1; /* 0x28 */ -#define NVM_CFG1_PORT_MBA_MASK 0x00000001 -#define NVM_CFG1_PORT_MBA_OFFSET 0 -#define NVM_CFG1_PORT_MBA_DISABLED 0x0 -#define NVM_CFG1_PORT_MBA_ENABLED 0x1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3 +#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001 +#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1 #define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 #define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 @@ -4836,61 +4619,30 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 #define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 #define NVM_CFG1_PORT_RESERVED5_OFFSET 9 -#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED5_2K 0x1 -#define NVM_CFG1_PORT_RESERVED5_4K 0x2 -#define NVM_CFG1_PORT_RESERVED5_8K 0x3 -#define NVM_CFG1_PORT_RESERVED5_16K 0x4 -#define NVM_CFG1_PORT_RESERVED5_32K 0x5 -#define NVM_CFG1_PORT_RESERVED5_64K 0x6 -#define NVM_CFG1_PORT_RESERVED5_128K 0x7 -#define NVM_CFG1_PORT_RESERVED5_256K 0x8 -#define NVM_CFG1_PORT_RESERVED5_512K 0x9 -#define NVM_CFG1_PORT_RESERVED5_1M 0xA -#define NVM_CFG1_PORT_RESERVED5_2M 0xB -#define NVM_CFG1_PORT_RESERVED5_4M 0xC -#define NVM_CFG1_PORT_RESERVED5_8M 0xD -#define NVM_CFG1_PORT_RESERVED5_16M 0xE -#define NVM_CFG1_PORT_RESERVED5_32M 0xF -#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 -#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21 u32 mba_cfg2; /* 0x2C */ -#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF -#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0 -#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000 -#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16 +#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF +#define NVM_CFG1_PORT_RESERVED65_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000 +#define NVM_CFG1_PORT_RESERVED66_OFFSET 16 u32 vf_cfg; /* 0x30 */ #define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF #define NVM_CFG1_PORT_RESERVED8_OFFSET 0 #define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 #define NVM_CFG1_PORT_RESERVED6_OFFSET 16 -#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED6_4K 0x1 -#define NVM_CFG1_PORT_RESERVED6_8K 0x2 -#define NVM_CFG1_PORT_RESERVED6_16K 0x3 -#define NVM_CFG1_PORT_RESERVED6_32K 0x4 -#define NVM_CFG1_PORT_RESERVED6_64K 0x5 -#define NVM_CFG1_PORT_RESERVED6_128K 0x6 -#define NVM_CFG1_PORT_RESERVED6_256K 0x7 -#define NVM_CFG1_PORT_RESERVED6_512K 0x8 -#define NVM_CFG1_PORT_RESERVED6_1M 0x9 -#define NVM_CFG1_PORT_RESERVED6_2M 0xA -#define NVM_CFG1_PORT_RESERVED6_4M 0xB -#define NVM_CFG1_PORT_RESERVED6_8M 0xC -#define NVM_CFG1_PORT_RESERVED6_16M 0xD -#define NVM_CFG1_PORT_RESERVED6_32M 0xE -#define NVM_CFG1_PORT_RESERVED6_64M 0xF struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ @@ -4973,18 +4725,16 @@ struct nvm_cfg1_func { u32 device_id; /* 0x10 */ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 -#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16 +#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16 u32 cmn_cfg; /* 0x14 */ -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7 #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 #define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 @@ -5029,8 +4779,8 @@ struct nvm_cfg1_func { struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */ - - u32 reserved[9]; /* 0x2C */ + u32 preboot_generic_cfg; /* 0x2C */ + u32 reserved[8]; /* 0x30 */ }; struct nvm_cfg1 { diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 0b21a553cc7d..f55ebdc3c832 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, * Return -1 on error. */ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, - u8 start_vport, u8 num_vports, struct init_qm_vport_params *vport_params) { - u8 tc, i, vport_id; u32 inc_val; + u8 tc, i; /* go over all PF VPORTs */ - for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { - u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET; - u16 *pq_ids = &vport_params[i].first_tx_pq_id[0]; + for (i = 0; i < num_vports; i++) { if (!vport_params[i].vport_wfq) continue; @@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, * different TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { - u16 vport_pq_id = pq_ids[tc]; + u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) { STORE_RT_REG(p_hwfn, - QM_REG_WFQVPWEIGHT_RT_OFFSET + - vport_pq_id, inc_val); - STORE_RT_REG(p_hwfn, temp + vport_pq_id, - QM_WFQ_UPPER_BOUND | - QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, - QM_WFQ_INIT_CRD(inc_val) | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); } } } @@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) return -1; - if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport, - p_params->num_vports, vport_params)) + if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 796f1390e598..3269b3610e03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -55,63 +55,98 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) int i; for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) - p_hwfn->rt_data[i].b_valid = false; + p_hwfn->rt_data.b_valid[i] = false; } void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { - p_hwfn->rt_data[rt_offset].init_val = val; - p_hwfn->rt_data[rt_offset].b_valid = true; + p_hwfn->rt_data.init_val[rt_offset] = val; + p_hwfn->rt_data.b_valid[rt_offset] = true; } void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, - u32 rt_offset, - u32 *val, + u32 rt_offset, u32 *p_val, size_t size) { size_t i; for (i = 0; i < size / sizeof(u32); i++) { - p_hwfn->rt_data[rt_offset + i].init_val = val[i]; - p_hwfn->rt_data[rt_offset + i].b_valid = true; + p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; + p_hwfn->rt_data.b_valid[rt_offset + i] = true; } } -static void qed_init_rt(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 addr, - u32 rt_offset, - u32 size) +static int qed_init_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u16 rt_offset, + u16 size, + bool b_must_dmae) { - struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset; - u32 i; + u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; + bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; + u16 i, segment; + int rc = 0; + /* Since not all RT entries are initialized, go over the RT and + * for each segment of initialized values use DMA. + */ for (i = 0; i < size; i++) { - if (!rt_data[i].b_valid) + if (!p_valid[i]) continue; - qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val); + + /* In case there isn't any wide-bus configuration here, + * simply write the data instead of using dmae. + */ + if (!b_must_dmae) { + qed_wr(p_hwfn, p_ptt, addr + (i << 2), + p_init_val[i]); + continue; + } + + /* Start of a new segment */ + for (segment = 1; i + segment < size; segment++) + if (!p_valid[i + segment]) + break; + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(p_init_val + i), + addr + (i << 2), segment, 0); + if (rc != 0) + return rc; + + /* Jump over the entire segment, including invalid entry */ + i += segment; } + + return rc; } int qed_init_alloc(struct qed_hwfn *p_hwfn) { - struct qed_rt_data *rt_data; + struct qed_rt_data *rt_data = &p_hwfn->rt_data; - rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC); - if (!rt_data) + rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, + GFP_KERNEL); + if (!rt_data->b_valid) return -ENOMEM; - p_hwfn->rt_data = rt_data; + rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE, + GFP_KERNEL); + if (!rt_data->init_val) { + kfree(rt_data->b_valid); + return -ENOMEM; + } return 0; } void qed_init_free(struct qed_hwfn *p_hwfn) { - kfree(p_hwfn->rt_data); - p_hwfn->rt_data = NULL; + kfree(p_hwfn->rt_data.init_val); + kfree(p_hwfn->rt_data.b_valid); } static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, @@ -289,7 +324,8 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, case INIT_SRC_RUNTIME: qed_init_rt(p_hwfn, p_ptt, addr, le16_to_cpu(arg->runtime.offset), - le16_to_cpu(arg->runtime.size)); + le16_to_cpu(arg->runtime.size), + b_must_dmae); break; } @@ -316,49 +352,50 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_read_op *cmd) { - u32 data = le32_to_cpu(cmd->op_data); - u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + bool (*comp_check)(u32 val, u32 expected_val); + u32 delay = QED_INIT_POLL_PERIOD_US, val; + u32 data, addr, poll; + int i; + + data = le32_to_cpu(cmd->op_data); + addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); - bool (*comp_check)(u32 val, - u32 expected_val); - u32 delay = QED_INIT_POLL_PERIOD_US, val; val = qed_rd(p_hwfn, p_ptt, addr); - data = le32_to_cpu(cmd->op_data); - if (GET_FIELD(data, INIT_READ_OP_POLL)) { - int i; + if (poll == INIT_POLL_NONE) + return; - switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) { - case INIT_COMPARISON_EQ: - comp_check = comp_eq; - break; - case INIT_COMPARISON_OR: - comp_check = comp_or; - break; - case INIT_COMPARISON_AND: - comp_check = comp_and; - break; - default: - comp_check = NULL; - DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", - data); - return; - } + switch (poll) { + case INIT_POLL_EQ: + comp_check = comp_eq; + break; + case INIT_POLL_OR: + comp_check = comp_or; + break; + case INIT_POLL_AND: + comp_check = comp_and; + break; + default: + DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", + cmd->op_data); + return; + } - for (i = 0; - i < QED_INIT_MAX_POLL_COUNT && - !comp_check(val, le32_to_cpu(cmd->expected_val)); - i++) { - udelay(delay); - val = qed_rd(p_hwfn, p_ptt, addr); - } + data = le32_to_cpu(cmd->expected_val); + for (i = 0; + i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data); + i++) { + udelay(delay); + val = qed_rd(p_hwfn, p_ptt, addr); + } - if (i == QED_INIT_MAX_POLL_COUNT) - DP_ERR(p_hwfn, - "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", - addr, le32_to_cpu(cmd->expected_val), - val, data); + if (i == QED_INIT_MAX_POLL_COUNT) { + DP_ERR(p_hwfn, + "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", + addr, le32_to_cpu(cmd->expected_val), + val, le32_to_cpu(cmd->op_data)); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index f72036a2ef5b..73feaf7eedb8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -124,6 +124,8 @@ struct qed_sp_vport_update_params { u8 update_vport_active_tx_flg; u8 vport_active_tx_flg; u8 update_approx_mcast_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; unsigned long bins[8]; struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; @@ -393,7 +395,9 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; p_cmn->tx_active_flg = p_params->vport_active_tx_flg; p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; - + p_cmn->accept_any_vlan = p_params->accept_any_vlan; + p_cmn->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { /* Return spq entry which is taken in qed_sp_init_request()*/ @@ -444,8 +448,10 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, static int qed_filter_accept_cmd(struct qed_dev *cdev, u8 vport, struct qed_filter_accept_flags accept_flags, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct qed_sp_vport_update_params vport_update_params; int i, rc; @@ -454,6 +460,8 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = vport; vport_update_params.accept_flags = accept_flags; + vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; + vport_update_params.accept_any_vlan = accept_any_vlan; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -471,6 +479,10 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, "Accept filter configured, flags = [Rx]%x [Tx]%x\n", accept_flags.rx_accept_filter, accept_flags.tx_accept_filter); + if (update_accept_any_vlan) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "accept_any_vlan=%d configured\n", + accept_any_vlan); } return 0; @@ -714,7 +726,6 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(p_params->sb); p_ramrod->sb_index = p_params->sb_idx; p_ramrod->stats_counter_id = stats_id; - p_ramrod->tc = p_pq_params->eth.tc; p_ramrod->pbl_size = cpu_to_le16(pbl_size); p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); @@ -821,9 +832,8 @@ qed_filter_action(enum qed_filter_opcode opcode) case QED_FILTER_REMOVE: action = ETH_FILTER_ACTION_REMOVE; break; - case QED_FILTER_REPLACE: case QED_FILTER_FLUSH: - action = ETH_FILTER_ACTION_REPLACE; + action = ETH_FILTER_ACTION_REMOVE_ALL; break; default: action = MAX_ETH_FILTER_ACTION; @@ -892,8 +902,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; switch (p_filter_cmd->opcode) { - case QED_FILTER_FLUSH: - p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break; + case QED_FILTER_REPLACE: case QED_FILTER_MOVE: p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; default: @@ -962,6 +971,12 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_second_filter->action = ETH_FILTER_ACTION_ADD; p_second_filter->vport_id = vport_to_add_to; + } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { + p_first_filter->vport_id = vport_to_add_to; + memcpy(p_second_filter, p_first_filter, + sizeof(*p_second_filter)); + p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; + p_second_filter->action = ETH_FILTER_ACTION_ADD; } else { action = qed_filter_action(p_filter_cmd->opcode); @@ -1344,6 +1359,9 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; + sp_params.accept_any_vlan = params->accept_any_vlan; + sp_params.update_accept_any_vlan_flg = + params->update_accept_any_vlan_flg; /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. * We need to re-fix the rss values per engine for CMT. @@ -1563,7 +1581,7 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; - return qed_filter_accept_cmd(cdev, 0, accept_flags, + return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, QED_SPQ_MODE_CB, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9d76ce249277..593f8871adb6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -190,7 +190,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; - dev_info->is_mf = IS_MF(&cdev->hwfns[0]); + dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); dev_info->fw_major = FW_MAJOR_VERSION; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index ba1b1f1ef789..1457e30faccf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -720,26 +720,25 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, return -EINVAL; } - if (p_hwfn->cdev->mf_mode != SF) { - info->bandwidth_min = (shmem_info.config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - info->bandwidth_min); - info->bandwidth_min = 1; - } - info->bandwidth_max = (shmem_info.config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - info->bandwidth_max); - info->bandwidth_max = 100; - } + info->bandwidth_min = (shmem_info.config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT; + if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + info->bandwidth_min); + info->bandwidth_min = 1; + } + + info->bandwidth_max = (shmem_info.config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + info->bandwidth_max); + info->bandwidth_max = 100; } if (shmem_info.mac_upper || shmem_info.mac_lower) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 287fadfab52d..8a83609c443c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -343,7 +343,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum mf_mode mode); + enum qed_mf_mode mode); /** * @brief qed_sp_pf_stop - PF Function Stop Ramrod diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 6f7879136633..33090f63548c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -90,7 +90,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, } int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum mf_mode mode) + enum qed_mf_mode mode) { struct qed_sp_init_request_params params; struct pf_start_ramrod_data *p_ramrod = NULL; @@ -125,6 +125,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); p_ramrod->mf_mode = mode; + switch (mode) { + case QED_MF_DEFAULT: + case QED_MF_NPAR: + p_ramrod->mf_mode = MF_NPAR; + break; + case QED_MF_OVLAN: + p_ramrod->mf_mode = MF_OVLAN; + break; + default: + DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + p_ramrod->mf_mode = MF_NPAR; + } p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ @@ -142,9 +154,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.personality = PERSONALITY_ETH; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n", + "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, - (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf", p_ramrod->outer_tag); return qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 7c6caf7f6612..15c5528b4f39 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -100,6 +100,12 @@ struct qede_stats { u64 tx_mac_ctrl_frames; }; +struct qede_vlan { + struct list_head list; + u16 vid; + bool configured; +}; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -154,6 +160,10 @@ struct qede_dev { u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ + struct list_head vlan_list; + u16 configured_vlans; + u16 non_configured_vlans; + bool accept_any_vlan; struct delayed_work sp_task; unsigned long sp_flags; }; @@ -173,9 +183,9 @@ enum QEDE_STATE { * skb are built only after the frame was DMA-ed. */ struct sw_rx_data { - u8 *data; - - DEFINE_DMA_UNMAP_ADDR(mapping); + struct page *data; + dma_addr_t mapping; + unsigned int page_offset; }; struct qede_rx_queue { @@ -188,6 +198,7 @@ struct qede_rx_queue { void __iomem *hw_rxq_prod_addr; int rx_buf_size; + unsigned int rx_buf_seg_size; u16 num_rx_buffers; u16 rxq_id; @@ -281,6 +292,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev); #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define QEDE_RX_HDR_SIZE 256 #define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index e442b85c9a5e..c49dc10ce151 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -217,9 +217,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct qed_link_params params; u32 speed; - if (edev->dev_info.common.is_mf) { + if (!edev->dev_info.common.is_mf_default) { DP_INFO(edev, - "Link parameters can not be changed in MF mode\n"); + "Link parameters can not be changed in non-default mode\n"); return -EOPNOTSUPP; } @@ -428,7 +428,7 @@ static int qede_set_pauseparam(struct net_device *dev, struct qed_link_params params; struct qed_link_output current_link; - if (!edev->dev_info.common.is_mf) { + if (!edev->dev_info.common.is_mf_default) { DP_INFO(edev, "Pause parameters can not be updated in non-default mode\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6237f10b5119..5f15e23a0f7d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -330,15 +330,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, struct eth_tx_3rd_bd *third_bd) { u8 l4_proto; - u16 bd2_bits = 0, bd2_bits2 = 0; + u16 bd2_bits1 = 0, bd2_bits2 = 0; - bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); + bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); - bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & + bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; - bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) @@ -347,16 +347,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, l4_proto = ip_hdr(skb)->protocol; if (l4_proto == IPPROTO_UDP) - bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; - if (third_bd) { + if (third_bd) third_bd->data.bitfields |= - ((tcp_hdrlen(skb) / 4) & - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT; - } + cpu_to_le16(((tcp_hdrlen(skb) / 4) & + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); - second_bd->data.bitfields = cpu_to_le16(bd2_bits); + second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); } @@ -464,12 +463,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { + u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; + /* We don't re-calculate IP checksum as it is already done by * the upper stack */ first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + first_bd->data.bitfields |= cpu_to_le16(temp); + /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't * support parsing IPv6 with extension header/s. @@ -491,7 +494,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* @@@TBD - if will not be removed need to check */ third_bd->data.bitfields |= - (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT)); /* Make life easier for FW guys who can't deal with header and * data on same BD. If we need to split, use the second bd... @@ -719,26 +722,52 @@ static bool qede_has_tx_work(struct qede_fastpath *fp) return false; } -/* This function copies the Rx buffer from the CONS position to the PROD - * position, since we failed to allocate a new Rx buffer. +/* This function reuses the buffer(from an offset) from + * consumer index to producer index in the bd ring */ -static void qede_reuse_rx_data(struct qede_rx_queue *rxq) +static inline void qede_reuse_page(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) { - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *sw_rx_data_cons = - &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - struct sw_rx_data *sw_rx_data_prod = - &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + struct sw_rx_data *curr_prod; + dma_addr_t new_mapping; - dma_unmap_addr_set(sw_rx_data_prod, mapping, - dma_unmap_addr(sw_rx_data_cons, mapping)); + curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + *curr_prod = *curr_cons; - sw_rx_data_prod->data = sw_rx_data_cons->data; - memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); + new_mapping = curr_prod->mapping + curr_prod->page_offset; + + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); - rxq->sw_rx_cons++; rxq->sw_rx_prod++; + curr_cons->data = NULL; +} + +static inline int qede_realloc_rx_buffer(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + /* Move to the next segment in the page */ + curr_cons->page_offset += rxq->rx_buf_seg_size; + + if (curr_cons->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + return -ENOMEM; + + dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + } else { + /* Increment refcount of the page as we don't want + * network stack to take the ownership of the page + * which can be recycled multiple times by the driver. + */ + atomic_inc(&curr_cons->data->_count); + qede_reuse_page(edev, rxq, curr_cons); + } + + return 0; } static inline void qede_update_rx_prod(struct qede_dev *edev, @@ -857,9 +886,10 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; struct sk_buff *skb; + struct page *data; + __le16 flags; u16 len, pad; u32 rx_hash; - u8 *data; /* Get the CQE from the completion ring */ cqe = (union eth_rx_cqe *) @@ -879,56 +909,110 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) data = sw_rx_data->data; fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->pkt_len); + len = le16_to_cpu(fp_cqe->len_on_first_bd); pad = fp_cqe->placement_offset; + flags = cqe->fast_path_regular.pars_flags.flags; - /* For every Rx BD consumed, we allocate a new BD so the BD ring - * is always with a fixed size. If allocation fails, we take the - * consumed BD and return it to the ring in the PROD position. - * The packet that was received on that BD will be dropped (and - * not passed to the upper stack). - */ - if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) { - dma_unmap_single(&edev->pdev->dev, - dma_unmap_addr(sw_rx_data, mapping), - rxq->rx_buf_size, DMA_FROM_DEVICE); - - /* If this is an error packet then drop it */ - parse_flag = - le16_to_cpu(cqe->fast_path_regular.pars_flags.flags); - csum_flag = qede_check_csum(parse_flag); - if (csum_flag == QEDE_CSUM_ERROR) { - DP_NOTICE(edev, - "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", - sw_comp_cons, parse_flag); - rxq->rx_hw_errors++; - kfree(data); - goto next_rx; - } - - skb = build_skb(data, 0); - - if (unlikely(!skb)) { - DP_NOTICE(edev, - "Build_skb failed, dropping incoming packet\n"); - kfree(data); - rxq->rx_alloc_errors++; - goto next_rx; - } + /* If this is an error packet then drop it */ + parse_flag = le16_to_cpu(flags); - skb_reserve(skb, pad); + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + DP_NOTICE(edev, + "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", + sw_comp_cons, parse_flag); + rxq->rx_hw_errors++; + qede_reuse_page(edev, rxq, sw_rx_data); + goto next_rx; + } - } else { + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) { DP_NOTICE(edev, - "New buffer allocation failed, dropping incoming packet and reusing its buffer\n"); - qede_reuse_rx_data(rxq); + "Build_skb failed, dropping incoming packet\n"); + qede_reuse_page(edev, rxq, sw_rx_data); rxq->rx_alloc_errors++; - goto next_cqe; + goto next_rx; + } + + /* Copy data into SKB */ + if (len + pad <= QEDE_RX_HDR_SIZE) { + memcpy(skb_put(skb, len), + page_address(data) + pad + + sw_rx_data->page_offset, len); + qede_reuse_page(edev, rxq, sw_rx_data); + } else { + struct skb_frag_struct *frag; + unsigned int pull_len; + unsigned char *va; + + frag = &skb_shinfo(skb)->frags[0]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data, + pad + sw_rx_data->page_offset, + len, rxq->rx_buf_seg_size); + + va = skb_frag_address(frag); + pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); + + /* Align the pull_len to optimize memcpy */ + memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); + + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + if (unlikely(qede_realloc_rx_buffer(edev, rxq, + sw_rx_data))) { + DP_ERR(edev, "Failed to allocate rx buffer\n"); + rxq->rx_alloc_errors++; + goto next_cqe; + } } - sw_rx_data->data = NULL; + if (fp_cqe->bd_num != 1) { + u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); + u8 num_frags; + + pkt_len -= len; + + for (num_frags = fp_cqe->bd_num - 1; num_frags > 0; + num_frags--) { + u16 cur_size = pkt_len > rxq->rx_buf_size ? + rxq->rx_buf_size : pkt_len; + + WARN_ONCE(!cur_size, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + goto next_cqe; + + rxq->sw_rx_cons++; + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + qed_chain_consume(&rxq->rx_bd_ring); + dma_unmap_page(&edev->pdev->dev, + sw_rx_data->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_fill_page_desc(skb, + skb_shinfo(skb)->nr_frags++, + sw_rx_data->data, 0, + cur_size); + + skb->truesize += PAGE_SIZE; + skb->data_len += cur_size; + skb->len += cur_size; + pkt_len -= cur_size; + } - skb_put(skb, len); + if (pkt_len) + DP_ERR(edev, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + } skb->protocol = eth_type_trans(skb, edev->ndev); @@ -1056,6 +1140,21 @@ static int qede_set_ucast_rx_mac(struct qede_dev *edev, return edev->ops->filter_config(edev->cdev, &filter_cmd); } +static int qede_set_ucast_rx_vlan(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + u16 vid) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.vlan_valid = 1; + filter_cmd.filter.ucast.vlan = vid; + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + void qede_fill_by_demand_stats(struct qede_dev *edev) { struct qed_eth_stats stats; @@ -1168,6 +1267,247 @@ static struct rtnl_link_stats64 *qede_get_stats64( return stats; } +static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) +{ + struct qed_update_vport_params params; + int rc; + + /* Proceed only if action actually needs to be performed */ + if (edev->accept_any_vlan == action) + return; + + memset(¶ms, 0, sizeof(params)); + + params.vport_id = 0; + params.accept_any_vlan = action; + params.update_accept_any_vlan_flg = 1; + + rc = edev->ops->vport_update(edev->cdev, ¶ms); + if (rc) { + DP_ERR(edev, "Failed to %s accept-any-vlan\n", + action ? "enable" : "disable"); + } else { + DP_INFO(edev, "%s accept-any-vlan\n", + action ? "enabled" : "disabled"); + edev->accept_any_vlan = action; + } +} + +static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan, *tmp; + int rc; + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + DP_INFO(edev, "Failed to allocate struct for vlan\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&vlan->list); + vlan->vid = vid; + vlan->configured = false; + + /* Verify vlan isn't already configured */ + list_for_each_entry(tmp, &edev->vlan_list, list) { + if (tmp->vid == vlan->vid) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "vlan already configured\n"); + kfree(vlan); + return -EEXIST; + } + } + + /* If interface is down, cache this VLAN ID and return */ + if (edev->state != QEDE_STATE_OPEN) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, VLAN %d will be configured when interface is up\n", + vid); + if (vid != 0) + edev->non_configured_vlans++; + list_add(&vlan->list, &edev->vlan_list); + + return 0; + } + + /* Check for the filter limit. + * Note - vlan0 has a reserved filter and can be added without + * worrying about quota + */ + if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || + (vlan->vid == 0)) { + rc = qede_set_ucast_rx_vlan(edev, + QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %d\n", + vlan->vid); + kfree(vlan); + return -EINVAL; + } + vlan->configured = true; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) + edev->configured_vlans++; + } else { + /* Out of quota; Activate accept-any-VLAN mode */ + if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, true); + + edev->non_configured_vlans++; + } + + list_add(&vlan->list, &edev->vlan_list); + + return 0; +} + +static void qede_del_vlan_from_list(struct qede_dev *edev, + struct qede_vlan *vlan) +{ + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + if (vlan->configured) + edev->configured_vlans--; + else + edev->non_configured_vlans--; + } + + list_del(&vlan->list); + kfree(vlan); +} + +static int qede_configure_vlan_filters(struct qede_dev *edev) +{ + int rc = 0, real_rc = 0, accept_any_vlan = 0; + struct qed_dev_eth_info *dev_info; + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return 0; + + dev_info = &edev->dev_info; + + /* Configure non-configured vlans */ + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (vlan->configured) + continue; + + /* We have used all our credits, now enable accept_any_vlan */ + if ((vlan->vid != 0) && + (edev->configured_vlans == dev_info->num_vlan_filters)) { + accept_any_vlan = 1; + continue; + } + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); + + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %u\n", + vlan->vid); + real_rc = rc; + continue; + } + + vlan->configured = true; + /* vlan0 filter doesn't consume our VLAN filter's quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans--; + edev->configured_vlans++; + } + } + + /* enable accept_any_vlan mode if we have more VLANs than credits, + * or remove accept_any_vlan mode if we've actually removed + * a non-configured vlan, and all remaining vlans are truly configured. + */ + + if (accept_any_vlan) + qede_config_accept_any_vlan(edev, true); + else if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, false); + + return real_rc; +} + +static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan = NULL; + int rc; + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); + + /* Find whether entry exists */ + list_for_each_entry(vlan, &edev->vlan_list, list) + if (vlan->vid == vid) + break; + + if (!vlan || (vlan->vid != vid)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Vlan isn't configured\n"); + return 0; + } + + if (edev->state != QEDE_STATE_OPEN) { + /* As interface is already down, we don't have a VPORT + * instance to remove vlan filter. So just update vlan list + */ + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, removing VLAN from list only\n"); + qede_del_vlan_from_list(edev, vlan); + return 0; + } + + /* Remove vlan */ + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + return -EINVAL; + } + + qede_del_vlan_from_list(edev, vlan); + + /* We have removed a VLAN - try to see if we can + * configure non-configured VLAN from the list. + */ + rc = qede_configure_vlan_filters(edev); + + return rc; +} + +static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) +{ + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return; + + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (!vlan->configured) + continue; + + vlan->configured = false; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans++; + edev->configured_vlans--; + } + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "marked vlan %d as non-configured\n", + vlan->vid); + } + + edev->accept_any_vlan = false; +} + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -1176,6 +1516,8 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, }; @@ -1220,6 +1562,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->num_tc = edev->dev_info.num_tc; + INIT_LIST_HEAD(&edev->vlan_list); + return edev; } @@ -1251,7 +1595,7 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_HIGHDMA; ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; ndev->hw_features = hw_features; @@ -1566,17 +1910,17 @@ static void qede_free_rx_buffers(struct qede_dev *edev, for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { struct sw_rx_data *rx_buf; - u8 *data; + struct page *data; rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; data = rx_buf->data; - dma_unmap_single(&edev->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - rxq->rx_buf_size, DMA_FROM_DEVICE); + dma_unmap_page(&edev->pdev->dev, + rx_buf->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); rx_buf->data = NULL; - kfree(data); + __free_page(data); } } @@ -1600,29 +1944,32 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct sw_rx_data *sw_rx_data; struct eth_rx_bd *rx_bd; dma_addr_t mapping; + struct page *data; u16 rx_buf_size; - u8 *data; rx_buf_size = rxq->rx_buf_size; - data = kmalloc(rx_buf_size, GFP_ATOMIC); + data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) { - DP_NOTICE(edev, "Failed to allocate Rx data\n"); + DP_NOTICE(edev, "Failed to allocate Rx data [page]\n"); return -ENOMEM; } - mapping = dma_map_single(&edev->pdev->dev, data, - rx_buf_size, DMA_FROM_DEVICE); + /* Map the entire page as it would be used + * for multiple RX buffer segment size mapping. + */ + mapping = dma_map_page(&edev->pdev->dev, data, 0, + PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - kfree(data); + __free_page(data); DP_NOTICE(edev, "Failed to map Rx buffer\n"); return -ENOMEM; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->page_offset = 0; sw_rx_data->data = data; - - dma_unmap_addr_set(sw_rx_data, mapping, mapping); + sw_rx_data->mapping = mapping; /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); @@ -1643,13 +1990,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rxq->num_rx_buffers = edev->q_num_rx_buffers; - rxq->rx_buf_size = NET_IP_ALIGN + - ETH_OVERHEAD + - edev->ndev->mtu + - QEDE_FW_RX_ALIGN_END; + rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + + edev->ndev->mtu; + if (rxq->rx_buf_size > PAGE_SIZE) + rxq->rx_buf_size = PAGE_SIZE; + + /* Segment size to spilt a page in multiple equal parts */ + rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); /* Allocate the parallel driver ring for Rx buffers */ - size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX; + size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); if (!rxq->sw_rx_ring) { DP_ERR(edev, "Rx buffers ring allocation failed\n"); @@ -1660,7 +2010,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, - NUM_RX_BDS_MAX, + RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@ -1671,7 +2021,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, - NUM_RX_BDS_MAX, + RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); if (rc) @@ -2252,6 +2602,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) DP_INFO(edev, "Stopped Queues\n"); + qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); /* Release the interrupts */ @@ -2320,6 +2671,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) edev->state = QEDE_STATE_OPEN; mutex_unlock(&edev->qede_lock); + /* Program un-configured VLANs */ + qede_configure_vlan_filters(edev); + /* Ask for link-up using current configuration */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; @@ -2580,6 +2934,17 @@ static void qede_config_rx_mode(struct net_device *ndev) goto out; } + /* take care of VLAN mode */ + if (ndev->flags & IFF_PROMISC) { + qede_config_accept_any_vlan(edev, true); + } else if (!edev->non_configured_vlans) { + /* It's possible that accept_any_vlan mode is set due to a + * previous setting of IFF_PROMISC. If vlan credits are + * sufficient, disable accept_any_vlan. + */ + qede_config_accept_any_vlan(edev, false); + } + rx_mode.filter.accept_flags = accept_flags; edev->ops->filter_config(edev->cdev, &rx_mode); out: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index a5f422f26cb4..daf05155b732 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -772,8 +772,10 @@ int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) int i, err = 0; for (i = 0; i < ahw->num_msix; i++) { - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_MQ_TX_CONFIG_INTR); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_MQ_TX_CONFIG_INTR); + if (err) + return err; type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; val = type | (ahw->intr_tbl[i].type << 4); if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 9a37247cf4b8..6b541e57c96a 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1039,7 +1039,7 @@ static int r6040_mii_probe(struct net_device *dev) return -ENODEV; } - phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link, + phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -1061,9 +1061,7 @@ static int r6040_mii_probe(struct net_device *dev) lp->old_link = 0; lp->old_duplex = -1; - dev_info(&lp->pdev->dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; } @@ -1077,7 +1075,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int card_idx = -1; int bar = 0; u16 *adrp; - int i; pr_info("%s\n", version); @@ -1189,19 +1186,11 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) lp->mii_bus->name = "r6040_eth_mii"; snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", dev_name(&pdev->dev), card_idx); - lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!lp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_mdio; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(lp->mii_bus); if (err) { dev_err(&pdev->dev, "failed to register MII bus\n"); - goto err_out_mdio_irq; + goto err_out_mdio; } err = r6040_mii_probe(dev); @@ -1220,8 +1209,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_mdio_unregister: mdiobus_unregister(lp->mii_bus); -err_out_mdio_irq: - kfree(lp->mii_bus->irq); err_out_mdio: mdiobus_free(lp->mii_bus); err_out_unmap: @@ -1244,7 +1231,6 @@ static void r6040_remove_one(struct pci_dev *pdev) unregister_netdev(dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); pci_iounmap(pdev, lp->base); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9fbe92ac225b..b2160d1b9c71 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -2,7 +2,7 @@ * * Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com> + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * * Based on the SuperH Ethernet driver * @@ -837,6 +837,8 @@ static inline void ravb_write(struct net_device *ndev, u32 data, iowrite32(data, priv->addr + reg); } +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set); int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value); irqreturn_t ravb_ptp_interrupt(struct net_device *ndev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0e1ebb39ab46..331c5969dca4 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2,7 +2,7 @@ * * Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com> + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * * Based on the SuperH Ethernet driver * @@ -42,6 +42,12 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set) +{ + ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); +} + int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) { int i; @@ -59,8 +65,7 @@ static int ravb_config(struct net_device *ndev) int error; /* Set config mode */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, - CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Check if the operating mode is changed to the config mode */ error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); if (error) @@ -72,13 +77,8 @@ static int ravb_config(struct net_device *ndev) static void ravb_set_duplex(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - u32 ecmr = ravb_read(ndev, ECMR); - if (priv->duplex) /* Full */ - ecmr |= ECMR_DM; - else /* Half */ - ecmr &= ~ECMR_DM; - ravb_write(ndev, ecmr, ECMR); + ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0); } static void ravb_set_rate(struct net_device *ndev) @@ -92,8 +92,6 @@ static void ravb_set_rate(struct net_device *ndev) case 1000: /* 1000BASE */ ravb_write(ndev, GECMR_SPEED_1000, GECMR); break; - default: - break; } } @@ -131,13 +129,8 @@ static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) { struct ravb_private *priv = container_of(ctrl, struct ravb_private, mdiobb); - u32 pir = ravb_read(priv->ndev, PIR); - if (set) - pir |= mask; - else - pir &= ~mask; - ravb_write(priv->ndev, pir, PIR); + ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); } /* MDC pin control */ @@ -343,16 +336,13 @@ error: static void ravb_emac_init(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - u32 ecmr; /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); /* PAUSE prohibition */ - ecmr = ravb_read(ndev, ECMR); - ecmr &= ECMR_DM; - ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; - ravb_write(ndev, ecmr, ECMR); + ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE, ECMR); ravb_set_rate(ndev); @@ -396,9 +386,9 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_ring_format(ndev, RAVB_NC); #if defined(__LITTLE_ENDIAN) - ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC); + ravb_modify(ndev, CCC, CCC_BOC, 0); #else - ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC); + ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); #endif /* Set AVB RX */ @@ -421,8 +411,7 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); /* Setting the control will start the AVB-DMAC process. */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION, - CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); return 0; } @@ -496,7 +485,7 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) break; } } - ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR); + ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); } } @@ -616,13 +605,13 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) static void ravb_rcv_snd_disable(struct net_device *ndev) { /* Disable TX and RX */ - ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR); + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); } static void ravb_rcv_snd_enable(struct net_device *ndev) { /* Enable TX and RX */ - ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR); + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } /* function for waiting dma process finished */ @@ -815,8 +804,8 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0); - ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC); + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -855,8 +844,7 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_set_rate(ndev); } if (!priv->link) { - ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF, - ECMR); + ravb_modify(ndev, ECMR, ECMR_TXF, 0); new_state = true; priv->link = phydev->link; if (priv->no_avb_link) @@ -927,8 +915,7 @@ static int ravb_phy_init(struct net_device *ndev) /* 10BASE is not supported */ phydev->supported &= ~PHY_10BT_FEATURES; - netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", - phydev->addr, phydev->irq, phydev->drv->name); + phy_attached_info(phydev); priv->phydev = phydev; @@ -1397,7 +1384,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc--; desc->die_dt = DT_FSTART; - ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); + ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); priv->cur_tx[q] += NUM_TX_DESC; if (priv->cur_tx[q] - priv->dirty_tx[q] > @@ -1472,15 +1459,10 @@ static void ravb_set_rx_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; - u32 ecmr; spin_lock_irqsave(&priv->lock, flags); - ecmr = ravb_read(ndev, ECMR); - if (ndev->flags & IFF_PROMISC) - ecmr |= ECMR_PRM; - else - ecmr &= ~ECMR_PRM; - ravb_write(ndev, ecmr, ECMR); + ravb_modify(ndev, ECMR, ECMR_PRM, + ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } @@ -1808,14 +1790,12 @@ static int ravb_probe(struct platform_device *pdev) /* Set AVB config mode */ if (chip_id == RCAR_GEN2) { - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | - CCC_OPC_CONFIG, CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Set CSEL value */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | - CCC_CSEL_HPB, CCC); + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); } else { - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | - CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | + CCC_GAC | CCC_CSEL_HPB); } /* Set CSEL value */ @@ -1828,7 +1808,7 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; /* Request GTI loading */ - ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR); + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 7a8ce920c49e..57992ccc4657 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -2,7 +2,7 @@ * * Copyright (C) 2013-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com> + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,7 +21,7 @@ static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request) if (error) return error; - ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR); + ravb_modify(ndev, GCCR, request, request); return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); } @@ -185,7 +185,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, ptp.info); struct net_device *ndev = priv->ndev; unsigned long flags; - u32 gic; if (req->index) return -EINVAL; @@ -195,12 +194,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - gic = ravb_read(ndev, GIC); - if (on) - gic |= GIC_PTCE; - else - gic &= ~GIC_PTCE; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -216,7 +210,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, struct ravb_ptp_perout *perout; unsigned long flags; int error = 0; - u32 gic; if (req->index) return -EINVAL; @@ -248,9 +241,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - gic = ravb_read(ndev, GIC); - gic |= GIC_PTME; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); } } else { spin_lock_irqsave(&priv->lock, flags); @@ -259,9 +250,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - gic = ravb_read(ndev, GIC); - gic &= ~GIC_PTME; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTME, 0); } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -331,7 +320,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; - u32 gccr; priv->ptp.info = ravb_ptp_info; @@ -340,8 +328,7 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) spin_lock_irqsave(&priv->lock, flags); ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); - gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS; - ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR); + ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index baa81535a8fc..a2767336b7c5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3,7 +3,7 @@ * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. - * Copyright (C) 2013-2014 Cogent Embedded, Inc. + * Copyright (C) 2013-2016 Cogent Embedded, Inc. * Copyright (C) 2014 Codethink Limited * * This program is free software; you can redistribute it and/or modify it @@ -428,6 +428,13 @@ static u32 sh_eth_read(struct net_device *ndev, int enum_index) return ioread32(mdp->addr + offset); } +static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, + u32 set) +{ + sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set, + enum_index); +} + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; @@ -467,10 +474,7 @@ static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); } static void sh_eth_chip_reset(struct net_device *ndev) @@ -496,8 +500,6 @@ static void sh_eth_set_rate_gether(struct net_device *ndev) case 1000: /* 1000BASE */ sh_eth_write(ndev, GECMR_1000, GECMR); break; - default: - break; } } @@ -583,12 +585,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev) switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_ELB, 0); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); - break; - default: + sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB); break; } } @@ -649,12 +649,10 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev) switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RTM, 0); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); - break; - default: + sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM); break; } } @@ -694,8 +692,6 @@ static void sh_eth_set_rate_sh7757(struct net_device *ndev) case 100:/* 100BASE */ sh_eth_write(ndev, 1, RTRATE); break; - default: - break; } } @@ -763,8 +759,6 @@ static void sh_eth_set_rate_giga(struct net_device *ndev) case 1000: /* 1000BASE */ sh_eth_write(ndev, 0x00000020, GECMR); break; - default: - break; } } @@ -924,8 +918,7 @@ static int sh_eth_reset(struct net_device *ndev) if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER); ret = sh_eth_check_reset(ndev); if (ret) @@ -949,11 +942,9 @@ static int sh_eth_reset(struct net_device *ndev) if (mdp->cd->select_mii) sh_eth_select_mii(ndev); } else { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER); mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0); } return ret; @@ -1240,7 +1231,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) { int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); - u32 val; /* Soft Reset */ ret = sh_eth_reset(ndev); @@ -1286,17 +1276,15 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); - sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); + sh_eth_modify(ndev, EESR, 0, 0); if (start) { mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); } /* PAUSE Prohibition */ - val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | - ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; - - sh_eth_write(ndev, val, ECMR); + sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE, ECMR); if (mdp->cd->set_rate) mdp->cd->set_rate(ndev); @@ -1535,15 +1523,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) static void sh_eth_rcv_snd_disable(struct net_device *ndev) { /* disable tx and rx */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & - ~(ECMR_RE | ECMR_TE), ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); } static void sh_eth_rcv_snd_enable(struct net_device *ndev) { /* enable tx and rx */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | - (ECMR_RE | ECMR_TE), ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } /* error control function */ @@ -1572,13 +1558,11 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) sh_eth_rcv_snd_disable(ndev); } else { /* Link Up */ - sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & - ~DMAC_M_ECI, EESIPR); + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); /* clear int */ - sh_eth_write(ndev, sh_eth_read(ndev, ECSR), - ECSR); - sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | - DMAC_M_ECI, EESIPR); + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, + DMAC_M_ECI); /* enable tx and rx */ sh_eth_rcv_snd_enable(ndev); } @@ -1768,9 +1752,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) mdp->cd->set_rate(ndev); } if (!mdp->link) { - sh_eth_write(ndev, - sh_eth_read(ndev, ECMR) & ~ECMR_TXF, - ECMR); + sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); new_state = 1; mdp->link = phydev->link; if (mdp->cd->no_psr || mdp->no_ether_link) @@ -1826,8 +1808,7 @@ static int sh_eth_phy_init(struct net_device *ndev) return PTR_ERR(phydev); } - netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", - phydev->addr, phydev->irq, phydev->drv->name); + phy_attached_info(phydev); mdp->phydev = phydev; @@ -2860,7 +2841,7 @@ static int sh_mdio_release(struct sh_eth_private *mdp) static int sh_mdio_init(struct sh_eth_private *mdp, struct sh_eth_plat_data *pd) { - int ret, i; + int ret; struct bb_info *bitbang; struct platform_device *pdev = mdp->pdev; struct device *dev = &mdp->pdev->dev; @@ -2886,20 +2867,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp, snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - /* PHY IRQ */ - mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!mdp->mii_bus->irq) { - ret = -ENOMEM; - goto out_free_bus; - } - /* register MDIO bus */ if (dev->of_node) { ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); } else { - for (i = 0; i < PHY_MAX_ADDR; i++) - mdp->mii_bus->irq[i] = PHY_POLL; if (pd->phy_irq > 0) mdp->mii_bus->irq[pd->phy] = pd->phy_irq; @@ -2936,8 +2907,6 @@ static const u16 *sh_eth_get_register_offset(int register_type) case SH_ETH_REG_FAST_SH3_SH2: reg_offset = sh_eth_offset_fast_sh3_sh2; break; - default: - break; } return reg_offset; diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile index f85fb12f36f1..faa36acee223 100644 --- a/drivers/net/ethernet/rocker/Makefile +++ b/drivers/net/ethernet/rocker/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_ROCKER) += rocker.o +rocker-y := rocker_main.o rocker_tlv.o rocker_ofdpa.o diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c deleted file mode 100644 index a4ab71d43e4e..000000000000 --- a/drivers/net/ethernet/rocker/rocker.c +++ /dev/null @@ -1,5493 +0,0 @@ -/* - * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver - * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> - * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/sched.h> -#include <linux/wait.h> -#include <linux/spinlock.h> -#include <linux/hashtable.h> -#include <linux/crc32.h> -#include <linux/sort.h> -#include <linux/random.h> -#include <linux/netdevice.h> -#include <linux/inetdevice.h> -#include <linux/skbuff.h> -#include <linux/socket.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/if_ether.h> -#include <linux/if_vlan.h> -#include <linux/if_bridge.h> -#include <linux/bitops.h> -#include <linux/ctype.h> -#include <net/switchdev.h> -#include <net/rtnetlink.h> -#include <net/ip_fib.h> -#include <net/netevent.h> -#include <net/arp.h> -#include <linux/io-64-nonatomic-lo-hi.h> -#include <generated/utsrelease.h> - -#include "rocker.h" - -static const char rocker_driver_name[] = "rocker"; - -static const struct pci_device_id rocker_pci_id_table[] = { - {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, - {0, } -}; - -struct rocker_flow_tbl_key { - u32 priority; - enum rocker_of_dpa_table_id tbl_id; - union { - struct { - u32 in_pport; - u32 in_pport_mask; - enum rocker_of_dpa_table_id goto_tbl; - } ig_port; - struct { - u32 in_pport; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool untagged; - __be16 new_vlan_id; - } vlan; - struct { - u32 in_pport; - u32 in_pport_mask; - __be16 eth_type; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool copy_to_cpu; - } term_mac; - struct { - __be16 eth_type; - __be32 dst4; - __be32 dst4_mask; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - } ucast_routing; - struct { - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - int has_eth_dst; - int has_eth_dst_mask; - __be16 vlan_id; - u32 tunnel_id; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - bool copy_to_cpu; - } bridge; - struct { - u32 in_pport; - u32 in_pport_mask; - u8 eth_src[ETH_ALEN]; - u8 eth_src_mask[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 eth_type; - __be16 vlan_id; - __be16 vlan_id_mask; - u8 ip_proto; - u8 ip_proto_mask; - u8 ip_tos; - u8 ip_tos_mask; - u32 group_id; - } acl; - }; -}; - -struct rocker_flow_tbl_entry { - struct hlist_node entry; - u32 cmd; - u64 cookie; - struct rocker_flow_tbl_key key; - size_t key_len; - u32 key_crc32; /* key */ -}; - -struct rocker_group_tbl_entry { - struct hlist_node entry; - u32 cmd; - u32 group_id; /* key */ - u16 group_count; - u32 *group_ids; - union { - struct { - u8 pop_vlan; - } l2_interface; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - u32 group_id; - } l2_rewrite; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - bool ttl_check; - u32 group_id; - } l3_unicast; - }; -}; - -struct rocker_fdb_tbl_entry { - struct hlist_node entry; - u32 key_crc32; /* key */ - bool learned; - unsigned long touched; - struct rocker_fdb_tbl_key { - struct rocker_port *rocker_port; - u8 addr[ETH_ALEN]; - __be16 vlan_id; - } key; -}; - -struct rocker_internal_vlan_tbl_entry { - struct hlist_node entry; - int ifindex; /* key */ - u32 ref_count; - __be16 vlan_id; -}; - -struct rocker_neigh_tbl_entry { - struct hlist_node entry; - __be32 ip_addr; /* key */ - struct net_device *dev; - u32 ref_count; - u32 index; - u8 eth_dst[ETH_ALEN]; - bool ttl_check; -}; - -struct rocker_desc_info { - char *data; /* mapped */ - size_t data_size; - size_t tlv_size; - struct rocker_desc *desc; - dma_addr_t mapaddr; -}; - -struct rocker_dma_ring_info { - size_t size; - u32 head; - u32 tail; - struct rocker_desc *desc; /* mapped */ - dma_addr_t mapaddr; - struct rocker_desc_info *desc_info; - unsigned int type; -}; - -struct rocker; - -enum { - ROCKER_CTRL_LINK_LOCAL_MCAST, - ROCKER_CTRL_LOCAL_ARP, - ROCKER_CTRL_IPV4_MCAST, - ROCKER_CTRL_IPV6_MCAST, - ROCKER_CTRL_DFLT_BRIDGING, - ROCKER_CTRL_DFLT_OVS, - ROCKER_CTRL_MAX, -}; - -#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 -#define ROCKER_N_INTERNAL_VLANS 255 -#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) -#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) - -struct rocker_port { - struct net_device *dev; - struct net_device *bridge_dev; - struct rocker *rocker; - unsigned int port_number; - u32 pport; - __be16 internal_vlan_id; - int stp_state; - u32 brport_flags; - unsigned long ageing_time; - bool ctrls[ROCKER_CTRL_MAX]; - unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; - struct napi_struct napi_tx; - struct napi_struct napi_rx; - struct rocker_dma_ring_info tx_ring; - struct rocker_dma_ring_info rx_ring; -}; - -struct rocker { - struct pci_dev *pdev; - u8 __iomem *hw_addr; - struct msix_entry *msix_entries; - unsigned int port_count; - struct rocker_port **ports; - struct { - u64 id; - } hw; - spinlock_t cmd_ring_lock; /* for cmd ring accesses */ - struct rocker_dma_ring_info cmd_ring; - struct rocker_dma_ring_info event_ring; - DECLARE_HASHTABLE(flow_tbl, 16); - spinlock_t flow_tbl_lock; /* for flow tbl accesses */ - u64 flow_tbl_next_cookie; - DECLARE_HASHTABLE(group_tbl, 16); - spinlock_t group_tbl_lock; /* for group tbl accesses */ - struct timer_list fdb_cleanup_timer; - DECLARE_HASHTABLE(fdb_tbl, 16); - spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ - unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; - DECLARE_HASHTABLE(internal_vlan_tbl, 8); - spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ - DECLARE_HASHTABLE(neigh_tbl, 16); - spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ - u32 neigh_tbl_next_index; -}; - -static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; -static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; -static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; -static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; - -/* Rocker priority levels for flow table entries. Higher - * priority match takes precedence over lower priority match. - */ - -enum { - ROCKER_PRIORITY_UNKNOWN = 0, - ROCKER_PRIORITY_IG_PORT = 1, - ROCKER_PRIORITY_VLAN = 1, - ROCKER_PRIORITY_TERM_MAC_UCAST = 0, - ROCKER_PRIORITY_TERM_MAC_MCAST = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_VLAN = 3, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_TENANT = 3, - ROCKER_PRIORITY_ACL_CTRL = 3, - ROCKER_PRIORITY_ACL_NORMAL = 2, - ROCKER_PRIORITY_ACL_DFLT = 1, -}; - -static bool rocker_vlan_id_is_internal(__be16 vlan_id) -{ - u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; - u16 end = 0xffe; - u16 _vlan_id = ntohs(vlan_id); - - return (_vlan_id >= start && _vlan_id <= end); -} - -static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port, - u16 vid, bool *pop_vlan) -{ - __be16 vlan_id; - - if (pop_vlan) - *pop_vlan = false; - vlan_id = htons(vid); - if (!vlan_id) { - vlan_id = rocker_port->internal_vlan_id; - if (pop_vlan) - *pop_vlan = true; - } - - return vlan_id; -} - -static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, - __be16 vlan_id) -{ - if (rocker_vlan_id_is_internal(vlan_id)) - return 0; - - return ntohs(vlan_id); -} - -static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_bridge_master(rocker_port->bridge_dev); -} - -static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_ovs_master(rocker_port->bridge_dev); -} - -#define ROCKER_OP_FLAG_REMOVE BIT(0) -#define ROCKER_OP_FLAG_NOWAIT BIT(1) -#define ROCKER_OP_FLAG_LEARNED BIT(2) -#define ROCKER_OP_FLAG_REFRESH BIT(3) - -static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t size) -{ - struct switchdev_trans_item *elem = NULL; - gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ? - GFP_ATOMIC : GFP_KERNEL; - - /* If in transaction prepare phase, allocate the memory - * and enqueue it on a transaction. If in transaction - * commit phase, dequeue the memory from the transaction - * rather than re-allocating the memory. The idea is the - * driver code paths for prepare and commit are identical - * so the memory allocated in the prepare phase is the - * memory used in the commit phase. - */ - - if (!trans) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - } else if (switchdev_trans_ph_prepare(trans)) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - if (!elem) - return NULL; - switchdev_trans_item_enqueue(trans, elem, kfree, elem); - } else { - elem = switchdev_trans_item_dequeue(trans); - } - - return elem ? elem + 1 : NULL; -} - -static void *rocker_port_kzalloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t size) -{ - return __rocker_port_mem_alloc(rocker_port, trans, flags, size); -} - -static void *rocker_port_kcalloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t n, size_t size) -{ - return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size); -} - -static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem) -{ - struct switchdev_trans_item *elem; - - /* Frees are ignored if in transaction prepare phase. The - * memory remains on the per-port list until freed in the - * commit phase. - */ - - if (switchdev_trans_ph_prepare(trans)) - return; - - elem = (struct switchdev_trans_item *) mem - 1; - kfree(elem); -} - -struct rocker_wait { - wait_queue_head_t wait; - bool done; - bool nowait; -}; - -static void rocker_wait_reset(struct rocker_wait *wait) -{ - wait->done = false; - wait->nowait = false; -} - -static void rocker_wait_init(struct rocker_wait *wait) -{ - init_waitqueue_head(&wait->wait); - rocker_wait_reset(wait); -} - -static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags) -{ - struct rocker_wait *wait; - - wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait)); - if (!wait) - return NULL; - rocker_wait_init(wait); - return wait; -} - -static void rocker_wait_destroy(struct switchdev_trans *trans, - struct rocker_wait *wait) -{ - rocker_port_kfree(trans, wait); -} - -static bool rocker_wait_event_timeout(struct rocker_wait *wait, - unsigned long timeout) -{ - wait_event_timeout(wait->wait, wait->done, HZ / 10); - if (!wait->done) - return false; - return true; -} - -static void rocker_wait_wake_up(struct rocker_wait *wait) -{ - wait->done = true; - wake_up(&wait->wait); -} - -static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) -{ - return rocker->msix_entries[vector].vector; -} - -static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) -{ - return rocker_msix_vector(rocker_port->rocker, - ROCKER_MSIX_VEC_TX(rocker_port->port_number)); -} - -static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) -{ - return rocker_msix_vector(rocker_port->rocker, - ROCKER_MSIX_VEC_RX(rocker_port->port_number)); -} - -#define rocker_write32(rocker, reg, val) \ - writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_read32(rocker, reg) \ - readl((rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_write64(rocker, reg, val) \ - writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_read64(rocker, reg) \ - readq((rocker)->hw_addr + (ROCKER_ ## reg)) - -/***************************** - * HW basic testing functions - *****************************/ - -static int rocker_reg_test(const struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - u64 test_reg; - u64 rnd; - - rnd = prandom_u32(); - rnd >>= 1; - rocker_write32(rocker, TEST_REG, rnd); - test_reg = rocker_read32(rocker, TEST_REG); - if (test_reg != rnd * 2) { - dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", - test_reg, rnd * 2); - return -EIO; - } - - rnd = prandom_u32(); - rnd <<= 31; - rnd |= prandom_u32(); - rocker_write64(rocker, TEST_REG64, rnd); - test_reg = rocker_read64(rocker, TEST_REG64); - if (test_reg != rnd * 2) { - dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", - test_reg, rnd * 2); - return -EIO; - } - - return 0; -} - -static int rocker_dma_test_one(const struct rocker *rocker, - struct rocker_wait *wait, u32 test_type, - dma_addr_t dma_handle, const unsigned char *buf, - const unsigned char *expect, size_t size) -{ - const struct pci_dev *pdev = rocker->pdev; - int i; - - rocker_wait_reset(wait); - rocker_write32(rocker, TEST_DMA_CTRL, test_type); - - if (!rocker_wait_event_timeout(wait, HZ / 10)) { - dev_err(&pdev->dev, "no interrupt received within a timeout\n"); - return -EIO; - } - - for (i = 0; i < size; i++) { - if (buf[i] != expect[i]) { - dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", - buf[i], i, expect[i]); - return -EIO; - } - } - return 0; -} - -#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) -#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 - -static int rocker_dma_test_offset(const struct rocker *rocker, - struct rocker_wait *wait, int offset) -{ - struct pci_dev *pdev = rocker->pdev; - unsigned char *alloc; - unsigned char *buf; - unsigned char *expect; - dma_addr_t dma_handle; - int i; - int err; - - alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, - GFP_KERNEL | GFP_DMA); - if (!alloc) - return -ENOMEM; - buf = alloc + offset; - expect = buf + ROCKER_TEST_DMA_BUF_SIZE; - - dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(pdev, dma_handle)) { - err = -EIO; - goto free_alloc; - } - - rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); - rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); - - memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - - memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - - prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); - for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) - expect[i] = ~buf[i]; - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - -unmap: - pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, - PCI_DMA_BIDIRECTIONAL); -free_alloc: - kfree(alloc); - - return err; -} - -static int rocker_dma_test(const struct rocker *rocker, - struct rocker_wait *wait) -{ - int i; - int err; - - for (i = 0; i < 8; i++) { - err = rocker_dma_test_offset(rocker, wait, i); - if (err) - return err; - } - return 0; -} - -static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) -{ - struct rocker_wait *wait = dev_id; - - rocker_wait_wake_up(wait); - - return IRQ_HANDLED; -} - -static int rocker_basic_hw_test(const struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - struct rocker_wait wait; - int err; - - err = rocker_reg_test(rocker); - if (err) { - dev_err(&pdev->dev, "reg test failed\n"); - return err; - } - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), - rocker_test_irq_handler, 0, - rocker_driver_name, &wait); - if (err) { - dev_err(&pdev->dev, "cannot assign test irq\n"); - return err; - } - - rocker_wait_init(&wait); - rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); - - if (!rocker_wait_event_timeout(&wait, HZ / 10)) { - dev_err(&pdev->dev, "no interrupt received within a timeout\n"); - err = -EIO; - goto free_irq; - } - - err = rocker_dma_test(rocker, &wait); - if (err) - dev_err(&pdev->dev, "dma test failed\n"); - -free_irq: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); - return err; -} - -/****** - * TLV - ******/ - -#define ROCKER_TLV_ALIGNTO 8U -#define ROCKER_TLV_ALIGN(len) \ - (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) -#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) - -/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * | Header | Pad | Payload | Pad | - * | (struct rocker_tlv) | ing | | ing | - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * <--------------------------- tlv->len --------------------------> - */ - -static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, - int *remaining) -{ - int totlen = ROCKER_TLV_ALIGN(tlv->len); - - *remaining -= totlen; - return (struct rocker_tlv *) ((char *) tlv + totlen); -} - -static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) -{ - return remaining >= (int) ROCKER_TLV_HDRLEN && - tlv->len >= ROCKER_TLV_HDRLEN && - tlv->len <= remaining; -} - -#define rocker_tlv_for_each(pos, head, len, rem) \ - for (pos = head, rem = len; \ - rocker_tlv_ok(pos, rem); \ - pos = rocker_tlv_next(pos, &(rem))) - -#define rocker_tlv_for_each_nested(pos, tlv, rem) \ - rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ - rocker_tlv_len(tlv), rem) - -static int rocker_tlv_attr_size(int payload) -{ - return ROCKER_TLV_HDRLEN + payload; -} - -static int rocker_tlv_total_size(int payload) -{ - return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); -} - -static int rocker_tlv_padlen(int payload) -{ - return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); -} - -static int rocker_tlv_type(const struct rocker_tlv *tlv) -{ - return tlv->type; -} - -static void *rocker_tlv_data(const struct rocker_tlv *tlv) -{ - return (char *) tlv + ROCKER_TLV_HDRLEN; -} - -static int rocker_tlv_len(const struct rocker_tlv *tlv) -{ - return tlv->len - ROCKER_TLV_HDRLEN; -} - -static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) -{ - return *(u8 *) rocker_tlv_data(tlv); -} - -static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) -{ - return *(u16 *) rocker_tlv_data(tlv); -} - -static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) -{ - return *(__be16 *) rocker_tlv_data(tlv); -} - -static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) -{ - return *(u32 *) rocker_tlv_data(tlv); -} - -static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) -{ - return *(u64 *) rocker_tlv_data(tlv); -} - -static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, - const char *buf, int buf_len) -{ - const struct rocker_tlv *tlv; - const struct rocker_tlv *head = (const struct rocker_tlv *) buf; - int rem; - - memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); - - rocker_tlv_for_each(tlv, head, buf_len, rem) { - u32 type = rocker_tlv_type(tlv); - - if (type > 0 && type <= maxtype) - tb[type] = tlv; - } -} - -static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype, - const struct rocker_tlv *tlv) -{ - rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), - rocker_tlv_len(tlv)); -} - -static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, - const struct rocker_desc_info *desc_info) -{ - rocker_tlv_parse(tb, maxtype, desc_info->data, - desc_info->desc->tlv_size); -} - -static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) -{ - return (struct rocker_tlv *) ((char *) desc_info->data + - desc_info->tlv_size); -} - -static int rocker_tlv_put(struct rocker_desc_info *desc_info, - int attrtype, int attrlen, const void *data) -{ - int tail_room = desc_info->data_size - desc_info->tlv_size; - int total_size = rocker_tlv_total_size(attrlen); - struct rocker_tlv *tlv; - - if (unlikely(tail_room < total_size)) - return -EMSGSIZE; - - tlv = rocker_tlv_start(desc_info); - desc_info->tlv_size += total_size; - tlv->type = attrtype; - tlv->len = rocker_tlv_attr_size(attrlen); - memcpy(rocker_tlv_data(tlv), data, attrlen); - memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); - return 0; -} - -static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, - int attrtype, u8 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); -} - -static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, - int attrtype, u16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); -} - -static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, - int attrtype, __be16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); -} - -static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, - int attrtype, u32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); -} - -static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, - int attrtype, __be32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); -} - -static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, - int attrtype, u64 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); -} - -static struct rocker_tlv * -rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) -{ - struct rocker_tlv *start = rocker_tlv_start(desc_info); - - if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) - return NULL; - - return start; -} - -static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, - struct rocker_tlv *start) -{ - start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; -} - -static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, - const struct rocker_tlv *start) -{ - desc_info->tlv_size = (const char *) start - desc_info->data; -} - -/****************************************** - * DMA rings and descriptors manipulations - ******************************************/ - -static u32 __pos_inc(u32 pos, size_t limit) -{ - return ++pos == limit ? 0 : pos; -} - -static int rocker_desc_err(const struct rocker_desc_info *desc_info) -{ - int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; - - switch (err) { - case ROCKER_OK: - return 0; - case -ROCKER_ENOENT: - return -ENOENT; - case -ROCKER_ENXIO: - return -ENXIO; - case -ROCKER_ENOMEM: - return -ENOMEM; - case -ROCKER_EEXIST: - return -EEXIST; - case -ROCKER_EINVAL: - return -EINVAL; - case -ROCKER_EMSGSIZE: - return -EMSGSIZE; - case -ROCKER_ENOTSUP: - return -EOPNOTSUPP; - case -ROCKER_ENOBUFS: - return -ENOBUFS; - } - - return -EINVAL; -} - -static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) -{ - desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; -} - -static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) -{ - u32 comp_err = desc_info->desc->comp_err; - - return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; -} - -static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) -{ - return (void *)(uintptr_t)desc_info->desc->cookie; -} - -static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, - void *ptr) -{ - desc_info->desc->cookie = (uintptr_t) ptr; -} - -static struct rocker_desc_info * -rocker_desc_head_get(const struct rocker_dma_ring_info *info) -{ - static struct rocker_desc_info *desc_info; - u32 head = __pos_inc(info->head, info->size); - - desc_info = &info->desc_info[info->head]; - if (head == info->tail) - return NULL; /* ring full */ - desc_info->tlv_size = 0; - return desc_info; -} - -static void rocker_desc_commit(const struct rocker_desc_info *desc_info) -{ - desc_info->desc->buf_size = desc_info->data_size; - desc_info->desc->tlv_size = desc_info->tlv_size; -} - -static void rocker_desc_head_set(const struct rocker *rocker, - struct rocker_dma_ring_info *info, - const struct rocker_desc_info *desc_info) -{ - u32 head = __pos_inc(info->head, info->size); - - BUG_ON(head == info->tail); - rocker_desc_commit(desc_info); - info->head = head; - rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); -} - -static struct rocker_desc_info * -rocker_desc_tail_get(struct rocker_dma_ring_info *info) -{ - static struct rocker_desc_info *desc_info; - - if (info->tail == info->head) - return NULL; /* nothing to be done between head and tail */ - desc_info = &info->desc_info[info->tail]; - if (!rocker_desc_gen(desc_info)) - return NULL; /* gen bit not set, desc is not ready yet */ - info->tail = __pos_inc(info->tail, info->size); - desc_info->tlv_size = desc_info->desc->tlv_size; - return desc_info; -} - -static void rocker_dma_ring_credits_set(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - u32 credits) -{ - if (credits) - rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); -} - -static unsigned long rocker_dma_ring_size_fix(size_t size) -{ - return max(ROCKER_DMA_SIZE_MIN, - min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); -} - -static int rocker_dma_ring_create(const struct rocker *rocker, - unsigned int type, - size_t size, - struct rocker_dma_ring_info *info) -{ - int i; - - BUG_ON(size != rocker_dma_ring_size_fix(size)); - info->size = size; - info->type = type; - info->head = 0; - info->tail = 0; - info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), - GFP_KERNEL); - if (!info->desc_info) - return -ENOMEM; - - info->desc = pci_alloc_consistent(rocker->pdev, - info->size * sizeof(*info->desc), - &info->mapaddr); - if (!info->desc) { - kfree(info->desc_info); - return -ENOMEM; - } - - for (i = 0; i < info->size; i++) - info->desc_info[i].desc = &info->desc[i]; - - rocker_write32(rocker, DMA_DESC_CTRL(info->type), - ROCKER_DMA_DESC_CTRL_RESET); - rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); - rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); - - return 0; -} - -static void rocker_dma_ring_destroy(const struct rocker *rocker, - const struct rocker_dma_ring_info *info) -{ - rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); - - pci_free_consistent(rocker->pdev, - info->size * sizeof(struct rocker_desc), - info->desc, info->mapaddr); - kfree(info->desc_info); -} - -static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, - struct rocker_dma_ring_info *info) -{ - int i; - - BUG_ON(info->head || info->tail); - - /* When ring is consumer, we need to advance head for each desc. - * That tells hw that the desc is ready to be used by it. - */ - for (i = 0; i < info->size - 1; i++) - rocker_desc_head_set(rocker, info, &info->desc_info[i]); - rocker_desc_commit(&info->desc_info[i]); -} - -static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - int direction, size_t buf_size) -{ - struct pci_dev *pdev = rocker->pdev; - int i; - int err; - - for (i = 0; i < info->size; i++) { - struct rocker_desc_info *desc_info = &info->desc_info[i]; - struct rocker_desc *desc = &info->desc[i]; - dma_addr_t dma_handle; - char *buf; - - buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); - if (!buf) { - err = -ENOMEM; - goto rollback; - } - - dma_handle = pci_map_single(pdev, buf, buf_size, direction); - if (pci_dma_mapping_error(pdev, dma_handle)) { - kfree(buf); - err = -EIO; - goto rollback; - } - - desc_info->data = buf; - desc_info->data_size = buf_size; - dma_unmap_addr_set(desc_info, mapaddr, dma_handle); - - desc->buf_addr = dma_handle; - desc->buf_size = buf_size; - } - return 0; - -rollback: - for (i--; i >= 0; i--) { - const struct rocker_desc_info *desc_info = &info->desc_info[i]; - - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), - desc_info->data_size, direction); - kfree(desc_info->data); - } - return err; -} - -static void rocker_dma_ring_bufs_free(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - int direction) -{ - struct pci_dev *pdev = rocker->pdev; - int i; - - for (i = 0; i < info->size; i++) { - const struct rocker_desc_info *desc_info = &info->desc_info[i]; - struct rocker_desc *desc = &info->desc[i]; - - desc->buf_addr = 0; - desc->buf_size = 0; - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), - desc_info->data_size, direction); - kfree(desc_info->data); - } -} - -static int rocker_dma_rings_init(struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - int err; - - err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, - ROCKER_DMA_CMD_DEFAULT_SIZE, - &rocker->cmd_ring); - if (err) { - dev_err(&pdev->dev, "failed to create command dma ring\n"); - return err; - } - - spin_lock_init(&rocker->cmd_ring_lock); - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); - if (err) { - dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); - goto err_dma_cmd_ring_bufs_alloc; - } - - err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, - ROCKER_DMA_EVENT_DEFAULT_SIZE, - &rocker->event_ring); - if (err) { - dev_err(&pdev->dev, "failed to create event dma ring\n"); - goto err_dma_event_ring_create; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, - PCI_DMA_FROMDEVICE, PAGE_SIZE); - if (err) { - dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); - goto err_dma_event_ring_bufs_alloc; - } - rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); - return 0; - -err_dma_event_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker->event_ring); -err_dma_event_ring_create: - rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); - return err; -} - -static void rocker_dma_rings_fini(struct rocker *rocker) -{ - rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker->event_ring); - rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); -} - -static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - struct sk_buff *skb, size_t buf_len) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - - dma_handle = pci_map_single(pdev, skb->data, buf_len, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, dma_handle)) - return -EIO; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) - goto tlv_put_failure; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) - goto tlv_put_failure; - return 0; - -tlv_put_failure: - pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); - desc_info->tlv_size = 0; - return -EMSGSIZE; -} - -static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) -{ - return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; -} - -static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info) -{ - struct net_device *dev = rocker_port->dev; - struct sk_buff *skb; - size_t buf_len = rocker_port_rx_buf_len(rocker_port); - int err; - - /* Ensure that hw will see tlv_size zero in case of an error. - * That tells hw to use another descriptor. - */ - rocker_desc_cookie_ptr_set(desc_info, NULL); - desc_info->tlv_size = 0; - - skb = netdev_alloc_skb_ip_align(dev, buf_len); - if (!skb) - return -ENOMEM; - err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); - if (err) { - dev_kfree_skb_any(skb); - return err; - } - rocker_desc_cookie_ptr_set(desc_info, skb); - return 0; -} - -static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, - const struct rocker_tlv **attrs) -{ - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - size_t len; - - if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || - !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) - return; - dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); - len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); - pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); -} - -static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, - const struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; - struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); - - if (!skb) - return; - rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); - rocker_dma_rx_ring_skb_unmap(rocker, attrs); - dev_kfree_skb_any(skb); -} - -static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) -{ - const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; - const struct rocker *rocker = rocker_port->rocker; - int i; - int err; - - for (i = 0; i < rx_ring->size; i++) { - err = rocker_dma_rx_ring_skb_alloc(rocker_port, - &rx_ring->desc_info[i]); - if (err) - goto rollback; - } - return 0; - -rollback: - for (i--; i >= 0; i--) - rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); - return err; -} - -static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) -{ - const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; - const struct rocker *rocker = rocker_port->rocker; - int i; - - for (i = 0; i < rx_ring->size; i++) - rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); -} - -static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) -{ - struct rocker *rocker = rocker_port->rocker; - int err; - - err = rocker_dma_ring_create(rocker, - ROCKER_DMA_TX(rocker_port->port_number), - ROCKER_DMA_TX_DEFAULT_SIZE, - &rocker_port->tx_ring); - if (err) { - netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); - return err; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE, - ROCKER_DMA_TX_DESC_SIZE); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); - goto err_dma_tx_ring_bufs_alloc; - } - - err = rocker_dma_ring_create(rocker, - ROCKER_DMA_RX(rocker_port->port_number), - ROCKER_DMA_RX_DEFAULT_SIZE, - &rocker_port->rx_ring); - if (err) { - netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); - goto err_dma_rx_ring_create; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL, - ROCKER_DMA_RX_DESC_SIZE); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); - goto err_dma_rx_ring_bufs_alloc; - } - - err = rocker_dma_rx_ring_skbs_alloc(rocker_port); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); - goto err_dma_rx_ring_skbs_alloc; - } - rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); - - return 0; - -err_dma_rx_ring_skbs_alloc: - rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL); -err_dma_rx_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); -err_dma_rx_ring_create: - rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE); -err_dma_tx_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); - return err; -} - -static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) -{ - struct rocker *rocker = rocker_port->rocker; - - rocker_dma_rx_ring_skbs_free(rocker_port); - rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); - rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE); - rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); -} - -static void rocker_port_set_enable(const struct rocker_port *rocker_port, - bool enable) -{ - u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); - - if (enable) - val |= 1ULL << rocker_port->pport; - else - val &= ~(1ULL << rocker_port->pport); - rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); -} - -/******************************** - * Interrupt handler and helpers - ********************************/ - -static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) -{ - struct rocker *rocker = dev_id; - const struct rocker_desc_info *desc_info; - struct rocker_wait *wait; - u32 credits = 0; - - spin_lock(&rocker->cmd_ring_lock); - while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { - wait = rocker_desc_cookie_ptr_get(desc_info); - if (wait->nowait) { - rocker_desc_gen_clear(desc_info); - rocker_wait_destroy(NULL, wait); - } else { - rocker_wait_wake_up(wait); - } - credits++; - } - spin_unlock(&rocker->cmd_ring_lock); - rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); - - return IRQ_HANDLED; -} - -static void rocker_port_link_up(const struct rocker_port *rocker_port) -{ - netif_carrier_on(rocker_port->dev); - netdev_info(rocker_port->dev, "Link is up\n"); -} - -static void rocker_port_link_down(const struct rocker_port *rocker_port) -{ - netif_carrier_off(rocker_port->dev); - netdev_info(rocker_port->dev, "Link is down\n"); -} - -static int rocker_event_link_change(const struct rocker *rocker, - const struct rocker_tlv *info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; - unsigned int port_number; - bool link_up; - struct rocker_port *rocker_port; - - rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || - !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) - return -EIO; - port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; - link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); - - if (port_number >= rocker->port_count) - return -EINVAL; - - rocker_port = rocker->ports[port_number]; - if (netif_carrier_ok(rocker_port->dev) != link_up) { - if (link_up) - rocker_port_link_up(rocker_port); - else - rocker_port_link_down(rocker_port); - } - - return 0; -} - -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags); - -static int rocker_event_mac_vlan_seen(const struct rocker *rocker, - const struct rocker_tlv *info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; - unsigned int port_number; - struct rocker_port *rocker_port; - const unsigned char *addr; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; - __be16 vlan_id; - - rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || - !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || - !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) - return -EIO; - port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; - addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); - vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); - - if (port_number >= rocker->port_count) - return -EINVAL; - - rocker_port = rocker->ports[port_number]; - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - return 0; - - return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags); -} - -static int rocker_event_process(const struct rocker *rocker, - const struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; - const struct rocker_tlv *info; - u16 type; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); - if (!attrs[ROCKER_TLV_EVENT_TYPE] || - !attrs[ROCKER_TLV_EVENT_INFO]) - return -EIO; - - type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); - info = attrs[ROCKER_TLV_EVENT_INFO]; - - switch (type) { - case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: - return rocker_event_link_change(rocker, info); - case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: - return rocker_event_mac_vlan_seen(rocker, info); - } - - return -EOPNOTSUPP; -} - -static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) -{ - struct rocker *rocker = dev_id; - const struct pci_dev *pdev = rocker->pdev; - const struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { - err = rocker_desc_err(desc_info); - if (err) { - dev_err(&pdev->dev, "event desc received with err %d\n", - err); - } else { - err = rocker_event_process(rocker, desc_info); - if (err) - dev_err(&pdev->dev, "event processing failed with err %d\n", - err); - } - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); - credits++; - } - rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); - - return IRQ_HANDLED; -} - -static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) -{ - struct rocker_port *rocker_port = dev_id; - - napi_schedule(&rocker_port->napi_tx); - return IRQ_HANDLED; -} - -static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) -{ - struct rocker_port *rocker_port = dev_id; - - napi_schedule(&rocker_port->napi_rx); - return IRQ_HANDLED; -} - -/******************** - * Command interface - ********************/ - -typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv); - -typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv); - -static int rocker_cmd_exec(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - rocker_cmd_prep_cb_t prepare, void *prepare_priv, - rocker_cmd_proc_cb_t process, void *process_priv) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - struct rocker_wait *wait; - bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT); - unsigned long lock_flags; - int err; - - wait = rocker_wait_create(rocker_port, trans, flags); - if (!wait) - return -ENOMEM; - wait->nowait = nowait; - - spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); - - desc_info = rocker_desc_head_get(&rocker->cmd_ring); - if (!desc_info) { - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - err = -EAGAIN; - goto out; - } - - err = prepare(rocker_port, desc_info, prepare_priv); - if (err) { - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - goto out; - } - - rocker_desc_cookie_ptr_set(desc_info, wait); - - if (!switchdev_trans_ph_prepare(trans)) - rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); - - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - - if (nowait) - return 0; - - if (!switchdev_trans_ph_prepare(trans)) - if (!rocker_wait_event_timeout(wait, HZ / 10)) - return -EIO; - - err = rocker_desc_err(desc_info); - if (err) - return err; - - if (process) - err = process(rocker_port, desc_info, process_priv); - - rocker_desc_gen_clear(desc_info); -out: - rocker_wait_destroy(trans, wait); - return err; -} - -static int -rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - struct ethtool_cmd *ecmd = priv; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - u32 speed; - u8 duplex; - u8 autoneg; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || - !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || - !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) - return -EIO; - - speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); - duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); - autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); - - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = SUPPORTED_TP; - ecmd->phy_address = 0xff; - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; - ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; - - return 0; -} - -static int -rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - unsigned char *macaddr = priv; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - const struct rocker_tlv *attr; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; - if (!attr) - return -EIO; - - if (rocker_tlv_len(attr) != ETH_ALEN) - return -EINVAL; - - ether_addr_copy(macaddr, rocker_tlv_data(attr)); - return 0; -} - -struct port_name { - char *buf; - size_t len; -}; - -static int -rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - struct port_name *name = priv; - const struct rocker_tlv *attr; - size_t i, j, len; - const char *str; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; - if (!attr) - return -EIO; - - len = min_t(size_t, rocker_tlv_len(attr), name->len); - str = rocker_tlv_data(attr); - - /* make sure name only contains alphanumeric characters */ - for (i = j = 0; i < len; ++i) { - if (isalnum(str[i])) { - name->buf[j] = str[i]; - j++; - } - } - - if (j == 0) - return -EIO; - - name->buf[j] = '\0'; - - return 0; -} - -static int -rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct ethtool_cmd *ecmd = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, - ethtool_cmd_speed(ecmd))) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, - ecmd->duplex)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, - ecmd->autoneg)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const unsigned char *macaddr = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, - ETH_ALEN, macaddr)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - int mtu = *(int *)priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, - mtu)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, - !!(rocker_port->brport_flags & BR_LEARNING))) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_ethtool_proc, - ecmd); -} - -static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, - unsigned char *macaddr) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_macaddr_proc, - macaddr); -} - -static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_ethtool_prep, - ecmd, NULL, NULL); -} - -static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, - unsigned char *macaddr) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_macaddr_prep, - macaddr, NULL, NULL); -} - -static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, - int mtu) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_mtu_prep, - &mtu, NULL, NULL); -} - -static int rocker_port_set_learning(struct rocker_port *rocker_port, - struct switchdev_trans *trans) -{ - return rocker_cmd_exec(rocker_port, trans, 0, - rocker_cmd_set_port_learning_prep, - NULL, NULL, NULL); -} - -static int -rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.ig_port.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.ig_port.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ig_port.goto_tbl)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.vlan.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.vlan.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.vlan.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.vlan.goto_tbl)) - return -EMSGSIZE; - if (entry->key.vlan.untagged && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, - entry->key.vlan.new_vlan_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.term_mac.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.term_mac.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.term_mac.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.term_mac.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.term_mac.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.term_mac.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.term_mac.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.term_mac.goto_tbl)) - return -EMSGSIZE; - if (entry->key.term_mac.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.term_mac.copy_to_cpu)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.ucast_routing.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, - entry->key.ucast_routing.dst4)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, - entry->key.ucast_routing.dst4_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ucast_routing.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.ucast_routing.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (entry->key.bridge.has_eth_dst && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.bridge.eth_dst)) - return -EMSGSIZE; - if (entry->key.bridge.has_eth_dst_mask && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.bridge.eth_dst_mask)) - return -EMSGSIZE; - if (entry->key.bridge.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.bridge.vlan_id)) - return -EMSGSIZE; - if (entry->key.bridge.tunnel_id && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, - entry->key.bridge.tunnel_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.bridge.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.bridge.group_id)) - return -EMSGSIZE; - if (entry->key.bridge.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.bridge.copy_to_cpu)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.acl.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.acl.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->key.acl.eth_src)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_src_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.acl.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.acl.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.acl.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.acl.vlan_id_mask)) - return -EMSGSIZE; - - switch (ntohs(entry->key.acl.eth_type)) { - case ETH_P_IP: - case ETH_P_IPV6: - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, - entry->key.acl.ip_proto)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_PROTO_MASK, - entry->key.acl.ip_proto_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, - entry->key.acl.ip_tos & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_DSCP_MASK, - entry->key.acl.ip_tos_mask & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, - (entry->key.acl.ip_tos & 0xc0) >> 6)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_ECN_MASK, - (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) - return -EMSGSIZE; - break; - } - - if (entry->key.acl.group_id != ROCKER_GROUP_NONE && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.acl.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, - entry->key.tbl_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, - entry->key.priority)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - - switch (entry->key.tbl_id) { - case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: - err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_VLAN: - err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: - err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: - err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_BRIDGING: - err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: - err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, - struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, - ROCKER_GROUP_PORT_GET(entry->group_id))) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, - entry->l2_interface.pop_vlan)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l2_rewrite.group_id)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l2_rewrite.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l2_rewrite.eth_dst)) - return -EMSGSIZE; - if (entry->l2_rewrite.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l2_rewrite.vlan_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - int i; - struct rocker_tlv *group_ids; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, - entry->group_count)) - return -EMSGSIZE; - - group_ids = rocker_tlv_nest_start(desc_info, - ROCKER_TLV_OF_DPA_GROUP_IDS); - if (!group_ids) - return -EMSGSIZE; - - for (i = 0; i < entry->group_count; i++) - /* Note TLV array is 1-based */ - if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) - return -EMSGSIZE; - - rocker_tlv_nest_end(desc_info, group_ids); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l3_unicast.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l3_unicast.eth_dst)) - return -EMSGSIZE; - if (entry->l3_unicast.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l3_unicast.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, - entry->l3_unicast.ttl_check)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l3_unicast.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: - err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: - err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: - err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -/*************************************************** - * Flow, group, FDB, internal VLAN and neigh tables - ***************************************************/ - -static int rocker_init_tbls(struct rocker *rocker) -{ - hash_init(rocker->flow_tbl); - spin_lock_init(&rocker->flow_tbl_lock); - - hash_init(rocker->group_tbl); - spin_lock_init(&rocker->group_tbl_lock); - - hash_init(rocker->fdb_tbl); - spin_lock_init(&rocker->fdb_tbl_lock); - - hash_init(rocker->internal_vlan_tbl); - spin_lock_init(&rocker->internal_vlan_tbl_lock); - - hash_init(rocker->neigh_tbl); - spin_lock_init(&rocker->neigh_tbl_lock); - - return 0; -} - -static void rocker_free_tbls(struct rocker *rocker) -{ - unsigned long flags; - struct rocker_flow_tbl_entry *flow_entry; - struct rocker_group_tbl_entry *group_entry; - struct rocker_fdb_tbl_entry *fdb_entry; - struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; - struct rocker_neigh_tbl_entry *neigh_entry; - struct hlist_node *tmp; - int bkt; - - spin_lock_irqsave(&rocker->flow_tbl_lock, flags); - hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) - hash_del(&flow_entry->entry); - spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - - spin_lock_irqsave(&rocker->group_tbl_lock, flags); - hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) - hash_del(&group_entry->entry); - spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) - hash_del(&fdb_entry->entry); - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); - hash_for_each_safe(rocker->internal_vlan_tbl, bkt, - tmp, internal_vlan_entry, entry) - hash_del(&internal_vlan_entry->entry); - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); - - spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); - hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) - hash_del(&neigh_entry->entry); - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); -} - -static struct rocker_flow_tbl_entry * -rocker_flow_tbl_find(const struct rocker *rocker, - const struct rocker_flow_tbl_entry *match) -{ - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - - hash_for_each_possible(rocker->flow_tbl, found, - entry, match->key_crc32) { - if (memcmp(&found->key, &match->key, key_len) == 0) - return found; - } - - return NULL; -} - -static int rocker_flow_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - match->cookie = found->cookie; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_port_kfree(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; - } else { - found = match; - found->cookie = rocker->flow_tbl_next_cookie++; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_add, found, NULL, NULL); -} - -static int rocker_flow_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - int err = 0; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; - } - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - rocker_port_kfree(trans, match); - - if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_del, - found, NULL, NULL); - rocker_port_kfree(trans, found); - } - - return err; -} - -static int rocker_flow_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_flow_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_flow_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - enum rocker_of_dpa_table_id goto_tbl) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_IG_PORT; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; - entry->key.ig_port.in_pport = in_pport; - entry->key.ig_port.in_pport_mask = in_pport_mask; - entry->key.ig_port.goto_tbl = goto_tbl; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, __be16 vlan_id, - __be16 vlan_id_mask, - enum rocker_of_dpa_table_id goto_tbl, - bool untagged, __be16 new_vlan_id) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_VLAN; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; - entry->key.vlan.in_pport = in_pport; - entry->key.vlan.vlan_id = vlan_id; - entry->key.vlan.vlan_id_mask = vlan_id_mask; - entry->key.vlan.goto_tbl = goto_tbl; - - entry->key.vlan.untagged = untagged; - entry->key.vlan.new_vlan_id = new_vlan_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 in_pport, u32 in_pport_mask, - __be16 eth_type, const u8 *eth_dst, - const u8 *eth_dst_mask, __be16 vlan_id, - __be16 vlan_id_mask, bool copy_to_cpu, - int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - if (is_multicast_ether_addr(eth_dst)) { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; - } else { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - } - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - entry->key.term_mac.in_pport = in_pport; - entry->key.term_mac.in_pport_mask = in_pport_mask; - entry->key.term_mac.eth_type = eth_type; - ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); - ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); - entry->key.term_mac.vlan_id = vlan_id; - entry->key.term_mac.vlan_id_mask = vlan_id_mask; - entry->key.term_mac.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 vlan_id, u32 tunnel_id, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, bool copy_to_cpu) -{ - struct rocker_flow_tbl_entry *entry; - u32 priority; - bool vlan_bridging = !!vlan_id; - bool dflt = !eth_dst || (eth_dst && eth_dst_mask); - bool wild = false; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; - - if (eth_dst) { - entry->key.bridge.has_eth_dst = 1; - ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); - } - if (eth_dst_mask) { - entry->key.bridge.has_eth_dst_mask = 1; - ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); - if (!ether_addr_equal(eth_dst_mask, ff_mac)) - wild = true; - } - - priority = ROCKER_PRIORITY_UNKNOWN; - if (vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; - else if (vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; - else if (vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_VLAN; - else if (!vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; - else if (!vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; - else if (!vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_TENANT; - - entry->key.priority = priority; - entry->key.bridge.vlan_id = vlan_id; - entry->key.bridge.tunnel_id = tunnel_id; - entry->key.bridge.goto_tbl = goto_tbl; - entry->key.bridge.group_id = group_id; - entry->key.bridge.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be16 eth_type, __be32 dst, - __be32 dst_mask, u32 priority, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - entry->key.priority = priority; - entry->key.ucast_routing.eth_type = eth_type; - entry->key.ucast_routing.dst4 = dst; - entry->key.ucast_routing.dst4_mask = dst_mask; - entry->key.ucast_routing.goto_tbl = goto_tbl; - entry->key.ucast_routing.group_id = group_id; - entry->key_len = offsetof(struct rocker_flow_tbl_key, - ucast_routing.group_id); - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - const u8 *eth_src, const u8 *eth_src_mask, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 eth_type, __be16 vlan_id, - __be16 vlan_id_mask, u8 ip_proto, - u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, - u32 group_id) -{ - u32 priority; - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - priority = ROCKER_PRIORITY_ACL_NORMAL; - if (eth_dst && eth_dst_mask) { - if (ether_addr_equal(eth_dst_mask, mcast_mac)) - priority = ROCKER_PRIORITY_ACL_DFLT; - else if (is_link_local_ether_addr(eth_dst)) - priority = ROCKER_PRIORITY_ACL_CTRL; - } - - entry->key.priority = priority; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - entry->key.acl.in_pport = in_pport; - entry->key.acl.in_pport_mask = in_pport_mask; - - if (eth_src) - ether_addr_copy(entry->key.acl.eth_src, eth_src); - if (eth_src_mask) - ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); - if (eth_dst) - ether_addr_copy(entry->key.acl.eth_dst, eth_dst); - if (eth_dst_mask) - ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); - - entry->key.acl.eth_type = eth_type; - entry->key.acl.vlan_id = vlan_id; - entry->key.acl.vlan_id_mask = vlan_id_mask; - entry->key.acl.ip_proto = ip_proto; - entry->key.acl.ip_proto_mask = ip_proto_mask; - entry->key.acl.ip_tos = ip_tos; - entry->key.acl.ip_tos_mask = ip_tos_mask; - entry->key.acl.group_id = group_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_group_tbl_entry * -rocker_group_tbl_find(const struct rocker *rocker, - const struct rocker_group_tbl_entry *match) -{ - struct rocker_group_tbl_entry *found; - - hash_for_each_possible(rocker->group_tbl, found, - entry, match->group_id) { - if (found->group_id == match->group_id) - return found; - } - - return NULL; -} - -static void rocker_group_tbl_entry_free(struct switchdev_trans *trans, - struct rocker_group_tbl_entry *entry) -{ - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - rocker_port_kfree(trans, entry->group_ids); - break; - default: - break; - } - rocker_port_kfree(trans, entry); -} - -static int rocker_group_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_group_tbl_entry_free(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; - } else { - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->group_tbl, &found->entry, found->group_id); - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_add, found, NULL, NULL); -} - -static int rocker_group_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - int err = 0; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; - } - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - rocker_group_tbl_entry_free(trans, match); - - if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_del, - found, NULL, NULL); - rocker_group_tbl_entry_free(trans, found); - } - - return err; -} - -static int rocker_group_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_group_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_group_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_interface(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u32 out_pport, - int pop_vlan) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - entry->l2_interface.pop_vlan = pop_vlan; - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = group_id; - entry->group_count = group_count; - - entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags, - group_count, sizeof(u32)); - if (!entry->group_ids) { - rocker_port_kfree(trans, entry); - return -ENOMEM; - } - memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_flood(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - return rocker_group_l2_fan_out(rocker_port, trans, flags, - group_count, group_ids, - group_id); -} - -static int rocker_group_l3_unicast(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 index, const u8 *src_mac, const u8 *dst_mac, - __be16 vlan_id, bool ttl_check, u32 pport) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L3_UNICAST(index); - if (src_mac) - ether_addr_copy(entry->l3_unicast.eth_src, src_mac); - if (dst_mac) - ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); - entry->l3_unicast.vlan_id = vlan_id; - entry->l3_unicast.ttl_check = ttl_check; - entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_neigh_tbl_entry * -rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr) -{ - struct rocker_neigh_tbl_entry *found; - - hash_for_each_possible(rocker->neigh_tbl, found, - entry, be32_to_cpu(ip_addr)) - if (found->ip_addr == ip_addr) - return found; - - return NULL; -} - -static void _rocker_neigh_add(struct rocker *rocker, - struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (!switchdev_trans_ph_commit(trans)) - entry->index = rocker->neigh_tbl_next_index++; - if (switchdev_trans_ph_prepare(trans)) - return; - entry->ref_count++; - hash_add(rocker->neigh_tbl, &entry->entry, - be32_to_cpu(entry->ip_addr)); -} - -static void _rocker_neigh_del(struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (switchdev_trans_ph_prepare(trans)) - return; - if (--entry->ref_count == 0) { - hash_del(&entry->entry); - rocker_port_kfree(trans, entry); - } -} - -static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry, - struct switchdev_trans *trans, - const u8 *eth_dst, bool ttl_check) -{ - if (eth_dst) { - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = ttl_check; - } else if (!switchdev_trans_ph_prepare(trans)) { - entry->ref_count++; - } -} - -static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be32 ip_addr, const u8 *eth_dst) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - __be16 eth_type = htons(ETH_P_IP); - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - u32 priority = 0; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - int err = 0; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = true; - _rocker_neigh_add(rocker, trans, entry); - } else if (removing) { - memcpy(entry, found, sizeof(*entry)); - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, eth_dst, true); - memcpy(entry, found, sizeof(*entry)); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (err) - goto err_out; - - /* For each active neighbor, we have an L3 unicast group and - * a /32 route to the neighbor, which uses the L3 unicast - * group. The L3 unicast group can also be referred to by - * other routes' nexthops. - */ - - err = rocker_group_l3_unicast(rocker_port, trans, flags, - entry->index, - rocker_port->dev->dev_addr, - entry->eth_dst, - rocker_port->internal_vlan_id, - entry->ttl_check, - rocker_port->pport); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) L3 unicast group index %d\n", - err, entry->index); - goto err_out; - } - - if (adding || removing) { - group_id = ROCKER_GROUP_L3_UNICAST(entry->index); - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, - eth_type, ip_addr, - inet_make_mask(32), - priority, goto_tbl, - group_id, flags); - - if (err) - netdev_err(rocker_port->dev, - "Error (%d) /32 unicast route %pI4 group 0x%08x\n", - err, &entry->ip_addr, group_id); - } - -err_out: - if (!adding) - rocker_port_kfree(trans, entry); - - return err; -} - -static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be32 ip_addr) -{ - struct net_device *dev = rocker_port->dev; - struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); - int err = 0; - - if (!n) { - n = neigh_create(&arp_tbl, &ip_addr, dev); - if (IS_ERR(n)) - return IS_ERR(n); - } - - /* If the neigh is already resolved, then go ahead and - * install the entry, otherwise start the ARP process to - * resolve the neigh. - */ - - if (n->nud_state & NUD_VALID) - err = rocker_port_ipv4_neigh(rocker_port, trans, 0, - ip_addr, n->ha); - else - neigh_event_send(n, NULL); - - neigh_release(n); - return err; -} - -static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be32 ip_addr, u32 *index) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - bool resolved = true; - int err = 0; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - if (found) - *index = found->index; - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - _rocker_neigh_add(rocker, trans, entry); - *index = entry->index; - resolved = false; - } else if (removing) { - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, NULL, false); - resolved = !is_zero_ether_addr(found->eth_dst); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (!adding) - rocker_port_kfree(trans, entry); - - if (err) - return err; - - /* Resolved means neigh ip_addr is resolved to neigh mac. */ - - if (!resolved) - err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr); - - return err; -} - -static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be16 vlan_id) -{ - struct rocker_port *p; - const struct rocker *rocker = rocker_port->rocker; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 *group_ids; - u8 group_count = 0; - int err = 0; - int i; - - group_ids = rocker_port_kcalloc(rocker_port, trans, flags, - rocker->port_count, sizeof(u32)); - if (!group_ids) - return -ENOMEM; - - /* Adjust the flood group for this VLAN. The flood group - * references an L2 interface group for each port in this - * VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (!p) - continue; - if (!rocker_port_is_bridged(p)) - continue; - if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { - group_ids[group_count++] = - ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); - } - } - - /* If there are no bridged ports in this VLAN, we're done */ - if (group_count == 0) - goto no_ports_in_vlan; - - err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id, - group_count, group_ids, group_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - -no_ports_in_vlan: - rocker_port_kfree(trans, group_ids); - return err; -} - -static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, bool pop_vlan) -{ - const struct rocker *rocker = rocker_port->rocker; - struct rocker_port *p; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - u32 out_pport; - int ref = 0; - int err; - int i; - - /* An L2 interface group for this port in this VLAN, but - * only when port STP state is LEARNING|FORWARDING. - */ - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) { - out_pport = rocker_port->pport; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - /* An L2 interface group for this VLAN to CPU port. - * Add when first port joins this VLAN and destroy when - * last port leaves this VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) - ref++; - } - - if ((!adding || ref != 1) && (adding || ref != 0)) - return 0; - - out_pport = 0; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for CPU port\n", err); - return err; - } - - return 0; -} - -static struct rocker_ctrl { - const u8 *eth_dst; - const u8 *eth_dst_mask; - __be16 eth_type; - bool acl; - bool bridge; - bool term; - bool copy_to_cpu; -} rocker_ctrls[] = { - [ROCKER_CTRL_LINK_LOCAL_MCAST] = { - /* pass link local multicast pkts up to CPU for filtering */ - .eth_dst = ll_mac, - .eth_dst_mask = ll_mask, - .acl = true, - }, - [ROCKER_CTRL_LOCAL_ARP] = { - /* pass local ARP pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .eth_type = htons(ETH_P_ARP), - .acl = true, - }, - [ROCKER_CTRL_IPV4_MCAST] = { - /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ - .eth_dst = ipv4_mcast, - .eth_dst_mask = ipv4_mask, - .eth_type = htons(ETH_P_IP), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_IPV6_MCAST] = { - /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ - .eth_dst = ipv6_mcast, - .eth_dst_mask = ipv6_mask, - .eth_type = htons(ETH_P_IPV6), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_BRIDGING] = { - /* flood any pkts on vlan */ - .bridge = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_OVS] = { - /* pass all pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .acl = true, - }, -}; - -static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport = rocker_port->pport; - u32 in_pport_mask = 0xffffffff; - u32 out_pport = 0; - const u8 *eth_src = NULL; - const u8 *eth_src_mask = NULL; - __be16 vlan_id_mask = htons(0xffff); - u8 ip_proto = 0; - u8 ip_proto_mask = 0; - u8 ip_tos = 0; - u8 ip_tos_mask = 0; - u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - int err; - - err = rocker_flow_tbl_acl(rocker_port, trans, flags, - in_pport, in_pport_mask, - eth_src, eth_src_mask, - ctrl->eth_dst, ctrl->eth_dst_mask, - ctrl->eth_type, - vlan_id, vlan_id_mask, - ip_proto, ip_proto_mask, - ip_tos, ip_tos_mask, - group_id); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, - const struct rocker_ctrl *ctrl, - __be16 vlan_id) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 tunnel_id = 0; - int err; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, - ctrl->eth_dst, ctrl->eth_dst_mask, - vlan_id, tunnel_id, - goto_tbl, group_id, ctrl->copy_to_cpu); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 vlan_id_mask = htons(0xffff); - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - ctrl->eth_type, ctrl->eth_dst, - ctrl->eth_dst_mask, vlan_id, - vlan_id_mask, ctrl->copy_to_cpu, - flags); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - if (ctrl->acl) - return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags, - ctrl, vlan_id); - if (ctrl->bridge) - return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags, - ctrl, vlan_id); - - if (ctrl->term) - return rocker_port_ctrl_vlan_term(rocker_port, trans, flags, - ctrl, vlan_id); - - return -EOPNOTSUPP; -} - -static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - int err = 0; - int i; - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (rocker_port->ctrls[i]) { - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - &rocker_ctrls[i], vlan_id); - if (err) - return err; - } - } - - return err; -} - -static int rocker_port_ctrl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - ctrl, htons(vid)); - if (err) - break; - } - - return err; -} - -static int rocker_port_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, u16 vid) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - u32 in_pport = rocker_port->pport; - __be16 vlan_id = htons(vid); - __be16 vlan_id_mask = htons(0xffff); - __be16 internal_vlan_id; - bool untagged; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - int err; - - internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); - - if (adding && test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already added */ - else if (!adding && !test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already removed */ - - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - if (adding) { - err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port ctrl vlan add\n", err); - goto err_out; - } - } - - err = rocker_port_vlan_l2_groups(rocker_port, trans, flags, - internal_vlan_id, untagged); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 groups\n", err); - goto err_out; - } - - err = rocker_port_vlan_flood_group(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - goto err_out; - } - - err = rocker_flow_tbl_vlan(rocker_port, trans, flags, - in_pport, vlan_id, vlan_id_mask, - goto_tbl, untagged, internal_vlan_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN table\n", err); - -err_out: - if (switchdev_trans_ph_prepare(trans)) - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - return err; -} - -static int rocker_port_ig_tbl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - enum rocker_of_dpa_table_id goto_tbl; - u32 in_pport; - u32 in_pport_mask; - int err; - - /* Normal Ethernet Frames. Matches pkts from any local physical - * ports. Goto VLAN tbl. - */ - - in_pport = 0; - in_pport_mask = 0xffff0000; - goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; - - err = rocker_flow_tbl_ig_port(rocker_port, trans, flags, - in_pport, in_pport_mask, - goto_tbl); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) ingress port table entry\n", err); - - return err; -} - -struct rocker_fdb_learn_work { - struct work_struct work; - struct rocker_port *rocker_port; - struct switchdev_trans *trans; - int flags; - u8 addr[ETH_ALEN]; - u16 vid; -}; - -static void rocker_port_fdb_learn_work(struct work_struct *work) -{ - const struct rocker_fdb_learn_work *lw = - container_of(work, struct rocker_fdb_learn_work, work); - bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); - bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); - struct switchdev_notifier_fdb_info info; - - info.addr = lw->addr; - info.vid = lw->vid; - - if (learned && removing) - call_switchdev_notifiers(SWITCHDEV_FDB_DEL, - lw->rocker_port->dev, &info.info); - else if (learned && !removing) - call_switchdev_notifiers(SWITCHDEV_FDB_ADD, - lw->rocker_port->dev, &info.info); - - rocker_port_kfree(lw->trans, work); -} - -static int rocker_port_fdb_learn(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *addr, __be16 vlan_id) -{ - struct rocker_fdb_learn_work *lw; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 out_pport = rocker_port->pport; - u32 tunnel_id = 0; - u32 group_id = ROCKER_GROUP_NONE; - bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); - bool copy_to_cpu = false; - int err; - - if (rocker_port_is_bridged(rocker_port)) - group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - - if (!(flags & ROCKER_OP_FLAG_REFRESH)) { - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr, - NULL, vlan_id, tunnel_id, goto_tbl, - group_id, copy_to_cpu); - if (err) - return err; - } - - if (!syncing) - return 0; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw)); - if (!lw) - return -ENOMEM; - - INIT_WORK(&lw->work, rocker_port_fdb_learn_work); - - lw->rocker_port = rocker_port; - lw->trans = trans; - lw->flags = flags; - ether_addr_copy(lw->addr, addr); - lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); - - if (switchdev_trans_ph_prepare(trans)) - rocker_port_kfree(trans, lw); - else - schedule_work(&lw->work); - - return 0; -} - -static struct rocker_fdb_tbl_entry * -rocker_fdb_tbl_find(const struct rocker *rocker, - const struct rocker_fdb_tbl_entry *match) -{ - struct rocker_fdb_tbl_entry *found; - - hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) - if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) - return found; - - return NULL; -} - -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *fdb; - struct rocker_fdb_tbl_entry *found; - bool removing = (flags & ROCKER_OP_FLAG_REMOVE); - unsigned long lock_flags; - - fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb)); - if (!fdb) - return -ENOMEM; - - fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); - fdb->touched = jiffies; - fdb->key.rocker_port = rocker_port; - ether_addr_copy(fdb->key.addr, addr); - fdb->key.vlan_id = vlan_id; - fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - found = rocker_fdb_tbl_find(rocker, fdb); - - if (found) { - found->touched = jiffies; - if (removing) { - rocker_port_kfree(trans, fdb); - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - } else if (!removing) { - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->fdb_tbl, &fdb->entry, - fdb->key_crc32); - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - /* Check if adding and already exists, or removing and can't find */ - if (!found != !removing) { - rocker_port_kfree(trans, fdb); - if (!found && removing) - return 0; - /* Refreshing existing to update aging timers */ - flags |= ROCKER_OP_FLAG_REFRESH; - } - - return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id); -} - -static int rocker_port_fdb_flush(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - unsigned long lock_flags; - struct hlist_node *tmp; - int bkt; - int err = 0; - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) - return 0; - - flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - if (!found->learned) - continue; - err = rocker_port_fdb_learn(rocker_port, trans, flags, - found->key.addr, - found->key.vlan_id); - if (err) - goto err_out; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - -err_out: - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static void rocker_fdb_cleanup(unsigned long data) -{ - struct rocker *rocker = (struct rocker *)data; - struct rocker_port *rocker_port; - struct rocker_fdb_tbl_entry *entry; - struct hlist_node *tmp; - unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; - unsigned long expires; - unsigned long lock_flags; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE | - ROCKER_OP_FLAG_LEARNED; - int bkt; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) { - if (!entry->learned) - continue; - rocker_port = entry->key.rocker_port; - expires = entry->touched + rocker_port->ageing_time; - if (time_before_eq(expires, jiffies)) { - rocker_port_fdb_learn(rocker_port, NULL, - flags, entry->key.addr, - entry->key.vlan_id); - hash_del(&entry->entry); - } else if (time_before(expires, next_timer)) { - next_timer = expires; - } - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer)); -} - -static int rocker_port_router_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 eth_type; - const u8 *dst_mac_mask = ff_mac; - __be16 vlan_id_mask = htons(0xffff); - bool copy_to_cpu = false; - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - eth_type = htons(ETH_P_IP); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - if (err) - return err; - - eth_type = htons(ETH_P_IPV6); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - - return err; -} - -static int rocker_port_fwding(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - bool pop_vlan; - u32 out_pport; - __be16 vlan_id; - u16 vid; - int err; - - /* Port will be forwarding-enabled if its STP state is LEARNING - * or FORWARDING. Traffic from CPU can still egress, regardless of - * port STP state. Use L2 interface group on port VLANs as a way - * to toggle port forwarding: if forwarding is disabled, L2 - * interface group will not exist. - */ - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - flags |= ROCKER_OP_FLAG_REMOVE; - - out_pport = rocker_port->pport; - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan_id = htons(vid); - pop_vlan = rocker_vlan_id_is_internal(vlan_id); - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - return 0; -} - -static int rocker_port_stp_update(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u8 state) -{ - bool want[ROCKER_CTRL_MAX] = { 0, }; - bool prev_ctrls[ROCKER_CTRL_MAX]; - u8 uninitialized_var(prev_state); - int err; - int i; - - if (switchdev_trans_ph_prepare(trans)) { - memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls)); - prev_state = rocker_port->stp_state; - } - - if (rocker_port->stp_state == state) - return 0; - - rocker_port->stp_state = state; - - switch (state) { - case BR_STATE_DISABLED: - /* port is completely disabled */ - break; - case BR_STATE_LISTENING: - case BR_STATE_BLOCKING: - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - break; - case BR_STATE_LEARNING: - case BR_STATE_FORWARDING: - if (!rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - want[ROCKER_CTRL_IPV4_MCAST] = true; - want[ROCKER_CTRL_IPV6_MCAST] = true; - if (rocker_port_is_bridged(rocker_port)) - want[ROCKER_CTRL_DFLT_BRIDGING] = true; - else if (rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_DFLT_OVS] = true; - else - want[ROCKER_CTRL_LOCAL_ARP] = true; - break; - } - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (want[i] != rocker_port->ctrls[i]) { - int ctrl_flags = flags | - (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); - err = rocker_port_ctrl(rocker_port, trans, ctrl_flags, - &rocker_ctrls[i]); - if (err) - goto err_out; - rocker_port->ctrls[i] = want[i]; - } - } - - err = rocker_port_fdb_flush(rocker_port, trans, flags); - if (err) - goto err_out; - - err = rocker_port_fwding(rocker_port, trans, flags); - -err_out: - if (switchdev_trans_ph_prepare(trans)) { - memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); - rocker_port->stp_state = prev_state; - } - - return err; -} - -static int rocker_port_fwd_enable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will enable port */ - return 0; - - /* port is not bridged, so simulate going to FORWARDING state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_FORWARDING); -} - -static int rocker_port_fwd_disable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will disable port */ - return 0; - - /* port is not bridged, so simulate going to DISABLED state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_DISABLED); -} - -static struct rocker_internal_vlan_tbl_entry * -rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex) -{ - struct rocker_internal_vlan_tbl_entry *found; - - hash_for_each_possible(rocker->internal_vlan_tbl, found, - entry, ifindex) { - if (found->ifindex == ifindex) - return found; - } - - return NULL; -} - -static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *entry; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - int i; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return 0; - - entry->ifindex = ifindex; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (found) { - kfree(entry); - goto found; - } - - found = entry; - hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); - - for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { - if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) - continue; - found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); - goto found; - } - - netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); - -found: - found->ref_count++; - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); - - return found->vlan_id; -} - -static void -rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - unsigned long bit; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (!found) { - netdev_err(rocker_port->dev, - "ifindex (%d) not found in internal VLAN tbl\n", - ifindex); - goto not_found; - } - - if (--found->ref_count <= 0) { - bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; - clear_bit(bit, rocker->internal_vlan_bitmap); - hash_del(&found->entry); - kfree(found); - } - -not_found: - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); -} - -static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, - struct switchdev_trans *trans, __be32 dst, - int dst_len, const struct fib_info *fi, - u32 tb_id, int flags) -{ - const struct fib_nh *nh; - __be16 eth_type = htons(ETH_P_IP); - __be32 dst_mask = inet_make_mask(dst_len); - __be16 internal_vlan_id = rocker_port->internal_vlan_id; - u32 priority = fi->fib_priority; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - bool nh_on_port; - bool has_gw; - u32 index; - int err; - - /* XXX support ECMP */ - - nh = fi->fib_nh; - nh_on_port = (fi->fib_dev == rocker_port->dev); - has_gw = !!nh->nh_gw; - - if (has_gw && nh_on_port) { - err = rocker_port_ipv4_nh(rocker_port, trans, flags, - nh->nh_gw, &index); - if (err) - return err; - - group_id = ROCKER_GROUP_L3_UNICAST(index); - } else { - /* Send to CPU for processing */ - group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); - } - - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst, - dst_mask, priority, goto_tbl, - group_id, flags); - if (err) - netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", - err, &dst); - - return err; -} - -/***************** - * Net device ops - *****************/ - -static int rocker_port_open(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_dma_rings_init(rocker_port); - if (err) - return err; - - err = request_irq(rocker_msix_tx_vector(rocker_port), - rocker_tx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign tx irq\n"); - goto err_request_tx_irq; - } - - err = request_irq(rocker_msix_rx_vector(rocker_port), - rocker_rx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign rx irq\n"); - goto err_request_rx_irq; - } - - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - if (err) - goto err_fwd_enable; - - napi_enable(&rocker_port->napi_tx); - napi_enable(&rocker_port->napi_rx); - if (!dev->proto_down) - rocker_port_set_enable(rocker_port, true); - netif_start_queue(dev); - return 0; - -err_fwd_enable: - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); -err_request_rx_irq: - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); -err_request_tx_irq: - rocker_port_dma_rings_fini(rocker_port); - return err; -} - -static int rocker_port_stop(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - netif_stop_queue(dev); - rocker_port_set_enable(rocker_port, false); - napi_disable(&rocker_port->napi_rx); - napi_disable(&rocker_port->napi_tx); - rocker_port_fwd_disable(rocker_port, NULL, - ROCKER_OP_FLAG_NOWAIT); - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); - rocker_port_dma_rings_fini(rocker_port); - - return 0; -} - -static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; - struct rocker_tlv *attr; - int rem; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); - if (!attrs[ROCKER_TLV_TX_FRAGS]) - return; - rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { - const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; - dma_addr_t dma_handle; - size_t len; - - if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) - continue; - rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, - attr); - if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || - !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) - continue; - dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); - len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); - pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); - } -} - -static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - char *buf, size_t buf_len) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - struct rocker_tlv *frag; - - dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { - if (net_ratelimit()) - netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); - return -EIO; - } - frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); - if (!frag) - goto unmap_frag; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, - dma_handle)) - goto nest_cancel; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, - buf_len)) - goto nest_cancel; - rocker_tlv_nest_end(desc_info, frag); - return 0; - -nest_cancel: - rocker_tlv_nest_cancel(desc_info, frag); -unmap_frag: - pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); - return -EMSGSIZE; -} - -static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - struct rocker_tlv *frags; - int i; - int err; - - desc_info = rocker_desc_head_get(&rocker_port->tx_ring); - if (unlikely(!desc_info)) { - if (net_ratelimit()) - netdev_err(dev, "tx ring full when queue awake\n"); - return NETDEV_TX_BUSY; - } - - rocker_desc_cookie_ptr_set(desc_info, skb); - - frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); - if (!frags) - goto out; - err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, - skb->data, skb_headlen(skb)); - if (err) - goto nest_cancel; - if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { - err = skb_linearize(skb); - if (err) - goto unmap_frags; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, - skb_frag_address(frag), - skb_frag_size(frag)); - if (err) - goto unmap_frags; - } - rocker_tlv_nest_end(desc_info, frags); - - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); - - desc_info = rocker_desc_head_get(&rocker_port->tx_ring); - if (!desc_info) - netif_stop_queue(dev); - - return NETDEV_TX_OK; - -unmap_frags: - rocker_tx_desc_frags_unmap(rocker_port, desc_info); -nest_cancel: - rocker_tlv_nest_cancel(desc_info, frags); -out: - dev_kfree_skb(skb); - dev->stats.tx_dropped++; - - return NETDEV_TX_OK; -} - -static int rocker_port_set_mac_address(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); - if (err) - return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - return 0; -} - -static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int running = netif_running(dev); - int err; - -#define ROCKER_PORT_MIN_MTU 68 -#define ROCKER_PORT_MAX_MTU 9000 - - if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) - return -EINVAL; - - if (running) - rocker_port_stop(dev); - - netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); - dev->mtu = new_mtu; - - err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); - if (err) - return err; - - if (running) - err = rocker_port_open(dev); - - return err; -} - -static int rocker_port_get_phys_port_name(struct net_device *dev, - char *buf, size_t len) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - struct port_name name = { .buf = buf, .len = len }; - int err; - - err = rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_phys_name_proc, - &name); - - return err ? -EOPNOTSUPP : 0; -} - -static int rocker_port_change_proto_down(struct net_device *dev, - bool proto_down) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - if (rocker_port->dev->flags & IFF_UP) - rocker_port_set_enable(rocker_port, !proto_down); - rocker_port->dev->proto_down = proto_down; - return 0; -} - -static void rocker_port_neigh_destroy(struct neighbour *n) -{ - struct rocker_port *rocker_port = netdev_priv(n->dev); - int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; - - rocker_port_ipv4_neigh(rocker_port, NULL, - flags, ip_addr, n->ha); -} - -static const struct net_device_ops rocker_port_netdev_ops = { - .ndo_open = rocker_port_open, - .ndo_stop = rocker_port_stop, - .ndo_start_xmit = rocker_port_xmit, - .ndo_set_mac_address = rocker_port_set_mac_address, - .ndo_change_mtu = rocker_port_change_mtu, - .ndo_bridge_getlink = switchdev_port_bridge_getlink, - .ndo_bridge_setlink = switchdev_port_bridge_setlink, - .ndo_bridge_dellink = switchdev_port_bridge_dellink, - .ndo_fdb_add = switchdev_port_fdb_add, - .ndo_fdb_del = switchdev_port_fdb_del, - .ndo_fdb_dump = switchdev_port_fdb_dump, - .ndo_get_phys_port_name = rocker_port_get_phys_port_name, - .ndo_change_proto_down = rocker_port_change_proto_down, - .ndo_neigh_destroy = rocker_port_neigh_destroy, -}; - -/******************** - * swdev interface - ********************/ - -static int rocker_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - const struct rocker *rocker = rocker_port->rocker; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(rocker->hw.id); - memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - attr->u.brport_flags = rocker_port->brport_flags; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - unsigned long brport_flags) -{ - unsigned long orig_flags; - int err = 0; - - orig_flags = rocker_port->brport_flags; - rocker_port->brport_flags = brport_flags; - if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING) - err = rocker_port_set_learning(rocker_port, trans); - - if (switchdev_trans_ph_prepare(trans)) - rocker_port->brport_flags = orig_flags; - - return err; -} - -static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 ageing_time) -{ - if (!switchdev_trans_ph_prepare(trans)) { - rocker_port->ageing_time = clock_t_to_jiffies(ageing_time); - mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies); - } - - return 0; -} - -static int rocker_port_attr_set(struct net_device *dev, - const struct switchdev_attr *attr, - struct switchdev_trans *trans) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = rocker_port_stp_update(rocker_port, trans, 0, - attr->u.stp_state); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = rocker_port_brport_flags_set(rocker_port, trans, - attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - err = rocker_port_bridge_ageing_time(rocker_port, trans, - attr->u.ageing_time); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u16 vid, u16 flags) -{ - int err; - - /* XXX deal with flags for PVID and untagged */ - - err = rocker_port_vlan(rocker_port, trans, 0, vid); - if (err) - return err; - - err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); - if (err) - rocker_port_vlan(rocker_port, trans, - ROCKER_OP_FLAG_REMOVE, vid); - - return err; -} - -static int rocker_port_vlans_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_add(rocker_port, trans, - vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = 0; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - -static int rocker_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj, - struct switchdev_trans *trans) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, trans, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); - break; - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_FDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_vlan_del(struct rocker_port *rocker_port, - u16 vid, u16 flags) -{ - int err; - - err = rocker_port_router_mac(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, htons(vid)); - if (err) - return err; - - return rocker_port_vlan(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, vid); -} - -static int rocker_port_vlans_del(struct rocker_port *rocker_port, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_del(rocker_port, vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = ROCKER_OP_FLAG_REMOVE; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - -static int rocker_port_obj_del(struct net_device *dev, - const struct switchdev_obj *obj) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_del(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, NULL, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, - ROCKER_OP_FLAG_REMOVE); - break; - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_del(rocker_port, NULL, - SWITCHDEV_OBJ_PORT_FDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - struct hlist_node *tmp; - unsigned long lock_flags; - int bkt; - int err = 0; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - ether_addr_copy(fdb->addr, found->key.addr); - fdb->ndm_state = NUD_REACHABLE; - fdb->vid = rocker_port_vlan_to_vid(rocker_port, - found->key.vlan_id); - err = cb(&fdb->obj); - if (err) - break; - } - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static int rocker_port_vlan_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan->flags = 0; - if (rocker_vlan_id_is_internal(htons(vid))) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - vlan->vid_begin = vlan->vid_end = vid; - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} - -static int rocker_port_obj_dump(struct net_device *dev, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_dump(rocker_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb); - break; - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlan_dump(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj), cb); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static const struct switchdev_ops rocker_port_switchdev_ops = { - .switchdev_port_attr_get = rocker_port_attr_get, - .switchdev_port_attr_set = rocker_port_attr_set, - .switchdev_port_obj_add = rocker_port_obj_add, - .switchdev_port_obj_del = rocker_port_obj_del, - .switchdev_port_obj_dump = rocker_port_obj_dump, -}; - -/******************** - * ethtool interface - ********************/ - -static int rocker_port_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); -} - -static int rocker_port_set_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); -} - -static void rocker_port_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); -} - -static struct rocker_port_stats { - char str[ETH_GSTRING_LEN]; - int type; -} rocker_port_stats[] = { - { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, - { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, - { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, - { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, - - { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, - { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, - { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, - { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, -}; - -#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) - -static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { - memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static int -rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_stats; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) - return -EMSGSIZE; - - cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_stats) - return -EMSGSIZE; - - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - - rocker_tlv_nest_end(desc_info, cmd_stats); - - return 0; -} - -static int -rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; - const struct rocker_tlv *pattr; - u32 pport; - u64 *data = priv; - int i; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - - if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) - return -EIO; - - pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); - if (pport != rocker_port->pport) - return -EIO; - - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { - pattr = stats_attrs[rocker_port_stats[i].type]; - if (!pattr) - continue; - - data[i] = rocker_tlv_get_u64(pattr); - } - - return 0; -} - -static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, - void *priv) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_stats_prep, NULL, - rocker_cmd_get_port_stats_ethtool_proc, - priv); -} - -static void rocker_port_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { - int i; - - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) - data[i] = 0; - } -} - -static int rocker_port_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ROCKER_PORT_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static const struct ethtool_ops rocker_port_ethtool_ops = { - .get_settings = rocker_port_get_settings, - .set_settings = rocker_port_set_settings, - .get_drvinfo = rocker_port_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_strings = rocker_port_get_strings, - .get_ethtool_stats = rocker_port_get_stats, - .get_sset_count = rocker_port_get_sset_count, -}; - -/***************** - * NAPI interface - *****************/ - -static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) -{ - return container_of(napi, struct rocker_port, napi_tx); -} - -static int rocker_port_poll_tx(struct napi_struct *napi, int budget) -{ - struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); - const struct rocker *rocker = rocker_port->rocker; - const struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - /* Cleanup tx descriptors */ - while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { - struct sk_buff *skb; - - err = rocker_desc_err(desc_info); - if (err && net_ratelimit()) - netdev_err(rocker_port->dev, "tx desc received with err %d\n", - err); - rocker_tx_desc_frags_unmap(rocker_port, desc_info); - - skb = rocker_desc_cookie_ptr_get(desc_info); - if (err == 0) { - rocker_port->dev->stats.tx_packets++; - rocker_port->dev->stats.tx_bytes += skb->len; - } else { - rocker_port->dev->stats.tx_errors++; - } - - dev_kfree_skb_any(skb); - credits++; - } - - if (credits && netif_queue_stopped(rocker_port->dev)) - netif_wake_queue(rocker_port->dev); - - napi_complete(napi); - rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); - - return 0; -} - -static int rocker_port_rx_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; - struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); - size_t rx_len; - u16 rx_flags = 0; - - if (!skb) - return -ENOENT; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); - if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) - return -EINVAL; - if (attrs[ROCKER_TLV_RX_FLAGS]) - rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); - - rocker_dma_rx_ring_skb_unmap(rocker, attrs); - - rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); - skb_put(skb, rx_len); - skb->protocol = eth_type_trans(skb, rocker_port->dev); - - if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) - skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; - - rocker_port->dev->stats.rx_packets++; - rocker_port->dev->stats.rx_bytes += skb->len; - - netif_receive_skb(skb); - - return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); -} - -static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) -{ - return container_of(napi, struct rocker_port, napi_rx); -} - -static int rocker_port_poll_rx(struct napi_struct *napi, int budget) -{ - struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); - const struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - /* Process rx descriptors */ - while (credits < budget && - (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { - err = rocker_desc_err(desc_info); - if (err) { - if (net_ratelimit()) - netdev_err(rocker_port->dev, "rx desc received with err %d\n", - err); - } else { - err = rocker_port_rx_proc(rocker, rocker_port, - desc_info); - if (err && net_ratelimit()) - netdev_err(rocker_port->dev, "rx processing failed with err %d\n", - err); - } - if (err) - rocker_port->dev->stats.rx_errors++; - - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); - credits++; - } - - if (credits < budget) - napi_complete(napi); - - rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); - - return credits; -} - -/***************** - * PCI driver ops - *****************/ - -static void rocker_carrier_init(const struct rocker_port *rocker_port) -{ - const struct rocker *rocker = rocker_port->rocker; - u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); - bool link_up; - - link_up = link_status & (1 << rocker_port->pport); - if (link_up) - netif_carrier_on(rocker_port->dev); - else - netif_carrier_off(rocker_port->dev); -} - -static void rocker_remove_ports(const struct rocker *rocker) -{ - struct rocker_port *rocker_port; - int i; - - for (i = 0; i < rocker->port_count; i++) { - rocker_port = rocker->ports[i]; - if (!rocker_port) - continue; - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); - unregister_netdev(rocker_port->dev); - free_netdev(rocker_port->dev); - } - kfree(rocker->ports); -} - -static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) -{ - const struct rocker *rocker = rocker_port->rocker; - const struct pci_dev *pdev = rocker->pdev; - int err; - - err = rocker_cmd_get_port_settings_macaddr(rocker_port, - rocker_port->dev->dev_addr); - if (err) { - dev_warn(&pdev->dev, "failed to get mac address, using random\n"); - eth_hw_addr_random(rocker_port->dev); - } -} - -static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) -{ - const struct pci_dev *pdev = rocker->pdev; - struct rocker_port *rocker_port; - struct net_device *dev; - u16 untagged_vid = 0; - int err; - - dev = alloc_etherdev(sizeof(struct rocker_port)); - if (!dev) - return -ENOMEM; - rocker_port = netdev_priv(dev); - rocker_port->dev = dev; - rocker_port->rocker = rocker; - rocker_port->port_number = port_number; - rocker_port->pport = port_number + 1; - rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; - rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME; - - rocker_port_dev_addr_init(rocker_port); - dev->netdev_ops = &rocker_port_netdev_ops; - dev->ethtool_ops = &rocker_port_ethtool_ops; - dev->switchdev_ops = &rocker_port_switchdev_ops; - netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, - NAPI_POLL_WEIGHT); - netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, - NAPI_POLL_WEIGHT); - rocker_carrier_init(rocker_port); - - dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; - - err = register_netdev(dev); - if (err) { - dev_err(&pdev->dev, "register_netdev failed\n"); - goto err_register_netdev; - } - rocker->ports[port_number] = rocker_port; - - switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); - - rocker_port_set_learning(rocker_port, NULL); - - err = rocker_port_ig_tbl(rocker_port, NULL, 0); - if (err) { - netdev_err(rocker_port->dev, "install ig port table failed\n"); - goto err_port_ig_tbl; - } - - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) { - netdev_err(rocker_port->dev, "install untagged VLAN failed\n"); - goto err_untagged_vlan; - } - - return 0; - -err_untagged_vlan: - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); -err_port_ig_tbl: - rocker->ports[port_number] = NULL; - unregister_netdev(dev); -err_register_netdev: - free_netdev(dev); - return err; -} - -static int rocker_probe_ports(struct rocker *rocker) -{ - int i; - size_t alloc_size; - int err; - - alloc_size = sizeof(struct rocker_port *) * rocker->port_count; - rocker->ports = kzalloc(alloc_size, GFP_KERNEL); - if (!rocker->ports) - return -ENOMEM; - for (i = 0; i < rocker->port_count; i++) { - err = rocker_probe_port(rocker, i); - if (err) - goto remove_ports; - } - return 0; - -remove_ports: - rocker_remove_ports(rocker); - return err; -} - -static int rocker_msix_init(struct rocker *rocker) -{ - struct pci_dev *pdev = rocker->pdev; - int msix_entries; - int i; - int err; - - msix_entries = pci_msix_vec_count(pdev); - if (msix_entries < 0) - return msix_entries; - - if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) - return -EINVAL; - - rocker->msix_entries = kmalloc_array(msix_entries, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!rocker->msix_entries) - return -ENOMEM; - - for (i = 0; i < msix_entries; i++) - rocker->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); - if (err < 0) - goto err_enable_msix; - - return 0; - -err_enable_msix: - kfree(rocker->msix_entries); - return err; -} - -static void rocker_msix_fini(const struct rocker *rocker) -{ - pci_disable_msix(rocker->pdev); - kfree(rocker->msix_entries); -} - -static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct rocker *rocker; - int err; - - rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); - if (!rocker) - return -ENOMEM; - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_device failed\n"); - goto err_pci_enable_device; - } - - err = pci_request_regions(pdev, rocker_driver_name); - if (err) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); - goto err_pci_request_regions; - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); - goto err_pci_set_dma_mask; - } - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); - goto err_pci_set_dma_mask; - } - } - - if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { - dev_err(&pdev->dev, "invalid PCI region size\n"); - err = -EINVAL; - goto err_pci_resource_len_check; - } - - rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!rocker->hw_addr) { - dev_err(&pdev->dev, "ioremap failed\n"); - err = -EIO; - goto err_ioremap; - } - pci_set_master(pdev); - - rocker->pdev = pdev; - pci_set_drvdata(pdev, rocker); - - rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); - - err = rocker_msix_init(rocker); - if (err) { - dev_err(&pdev->dev, "MSI-X init failed\n"); - goto err_msix_init; - } - - err = rocker_basic_hw_test(rocker); - if (err) { - dev_err(&pdev->dev, "basic hw test failed\n"); - goto err_basic_hw_test; - } - - rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); - - err = rocker_dma_rings_init(rocker); - if (err) - goto err_dma_rings_init; - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), - rocker_cmd_irq_handler, 0, - rocker_driver_name, rocker); - if (err) { - dev_err(&pdev->dev, "cannot assign cmd irq\n"); - goto err_request_cmd_irq; - } - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), - rocker_event_irq_handler, 0, - rocker_driver_name, rocker); - if (err) { - dev_err(&pdev->dev, "cannot assign event irq\n"); - goto err_request_event_irq; - } - - rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - - err = rocker_init_tbls(rocker); - if (err) { - dev_err(&pdev->dev, "cannot init rocker tables\n"); - goto err_init_tbls; - } - - setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup, - (unsigned long) rocker); - mod_timer(&rocker->fdb_cleanup_timer, jiffies); - - err = rocker_probe_ports(rocker); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); - goto err_probe_ports; - } - - dev_info(&pdev->dev, "Rocker switch with id %*phN\n", - (int)sizeof(rocker->hw.id), &rocker->hw.id); - - return 0; - -err_probe_ports: - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); -err_init_tbls: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); -err_request_event_irq: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); -err_request_cmd_irq: - rocker_dma_rings_fini(rocker); -err_dma_rings_init: -err_basic_hw_test: - rocker_msix_fini(rocker); -err_msix_init: - iounmap(rocker->hw_addr); -err_ioremap: -err_pci_resource_len_check: -err_pci_set_dma_mask: - pci_release_regions(pdev); -err_pci_request_regions: - pci_disable_device(pdev); -err_pci_enable_device: - kfree(rocker); - return err; -} - -static void rocker_remove(struct pci_dev *pdev) -{ - struct rocker *rocker = pci_get_drvdata(pdev); - - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); - rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); - rocker_remove_ports(rocker); - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); - rocker_dma_rings_fini(rocker); - rocker_msix_fini(rocker); - iounmap(rocker->hw_addr); - pci_release_regions(rocker->pdev); - pci_disable_device(rocker->pdev); - kfree(rocker); -} - -static struct pci_driver rocker_pci_driver = { - .name = rocker_driver_name, - .id_table = rocker_pci_id_table, - .probe = rocker_probe, - .remove = rocker_remove, -}; - -/************************************ - * Net device notifier event handler - ************************************/ - -static bool rocker_port_dev_check(const struct net_device *dev) -{ - return dev->netdev_ops == &rocker_port_netdev_ops; -} - -static int rocker_port_bridge_join(struct rocker_port *rocker_port, - struct net_device *bridge) -{ - u16 untagged_vid = 0; - int err; - - /* Port is joining bridge, so the internal VLAN for the - * port is going to change to the bridge internal VLAN. - * Let's remove untagged VLAN (vid=0) from port and - * re-add once internal VLAN has changed. - */ - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - - rocker_port->bridge_dev = bridge; - switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); - - return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); -} - -static int rocker_port_bridge_leave(struct rocker_port *rocker_port) -{ - u16 untagged_vid = 0; - int err; - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->bridge_dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, - rocker_port->dev->ifindex); - - switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, - false); - rocker_port->bridge_dev = NULL; - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) - return err; - - if (rocker_port->dev->flags & IFF_UP) - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - - -static int rocker_port_ovs_changed(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err; - - rocker_port->bridge_dev = master; - - err = rocker_port_fwd_disable(rocker_port, NULL, 0); - if (err) - return err; - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - -static int rocker_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err = 0; - - if (netif_is_bridge_master(master)) - err = rocker_port_bridge_join(rocker_port, master); - else if (netif_is_ovs_master(master)) - err = rocker_port_ovs_changed(rocker_port, master); - return err; -} - -static int rocker_port_master_unlinked(struct rocker_port *rocker_port) -{ - int err = 0; - - if (rocker_port_is_bridged(rocker_port)) - err = rocker_port_bridge_leave(rocker_port); - else if (rocker_port_is_ovsed(rocker_port)) - err = rocker_port_ovs_changed(rocker_port, NULL); - return err; -} - -static int rocker_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct netdev_notifier_changeupper_info *info; - struct rocker_port *rocker_port; - int err; - - if (!rocker_port_dev_check(dev)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_CHANGEUPPER: - info = ptr; - if (!info->master) - goto out; - rocker_port = netdev_priv(dev); - if (info->linking) { - err = rocker_port_master_linked(rocker_port, - info->upper_dev); - if (err) - netdev_warn(dev, "failed to reflect master linked (err %d)\n", - err); - } else { - err = rocker_port_master_unlinked(rocker_port); - if (err) - netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", - err); - } - break; - } -out: - return NOTIFY_DONE; -} - -static struct notifier_block rocker_netdevice_nb __read_mostly = { - .notifier_call = rocker_netdevice_event, -}; - -/************************************ - * Net event notifier event handler - ************************************/ - -static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) | - ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; - - return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha); -} - -static int rocker_netevent_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev; - struct neighbour *n = ptr; - int err; - - switch (event) { - case NETEVENT_NEIGH_UPDATE: - if (n->tbl != &arp_tbl) - return NOTIFY_DONE; - dev = n->dev; - if (!rocker_port_dev_check(dev)) - return NOTIFY_DONE; - err = rocker_neigh_update(dev, n); - if (err) - netdev_warn(dev, - "failed to handle neigh update (err %d)\n", - err); - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block rocker_netevent_nb __read_mostly = { - .notifier_call = rocker_netevent_event, -}; - -/*********************** - * Module init and exit - ***********************/ - -static int __init rocker_module_init(void) -{ - int err; - - register_netdevice_notifier(&rocker_netdevice_nb); - register_netevent_notifier(&rocker_netevent_nb); - err = pci_register_driver(&rocker_pci_driver); - if (err) - goto err_pci_register_driver; - return 0; - -err_pci_register_driver: - unregister_netevent_notifier(&rocker_netevent_nb); - unregister_netdevice_notifier(&rocker_netdevice_nb); - return err; -} - -static void __exit rocker_module_exit(void) -{ - unregister_netevent_notifier(&rocker_netevent_nb); - unregister_netdevice_notifier(&rocker_netdevice_nb); - pci_unregister_driver(&rocker_pci_driver); -} - -module_init(rocker_module_init); -module_exit(rocker_module_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); -MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); -MODULE_DESCRIPTION("Rocker switch device driver"); -MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 12490b2f6504..1ab995f7146b 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver - * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> * * This program is free software; you can redistribute it and/or modify @@ -12,456 +12,137 @@ #ifndef _ROCKER_H #define _ROCKER_H +#include <linux/kernel.h> #include <linux/types.h> - -/* Return codes */ -enum { - ROCKER_OK = 0, - ROCKER_ENOENT = 2, - ROCKER_ENXIO = 6, - ROCKER_ENOMEM = 12, - ROCKER_EEXIST = 17, - ROCKER_EINVAL = 22, - ROCKER_EMSGSIZE = 90, - ROCKER_ENOTSUP = 95, - ROCKER_ENOBUFS = 105, -}; - -#define ROCKER_FP_PORTS_MAX 62 - -#define PCI_VENDOR_ID_REDHAT 0x1b36 -#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 - -#define ROCKER_PCI_BAR0_SIZE 0x2000 - -/* MSI-X vectors */ -enum { - ROCKER_MSIX_VEC_CMD, - ROCKER_MSIX_VEC_EVENT, - ROCKER_MSIX_VEC_TEST, - ROCKER_MSIX_VEC_RESERVED0, - __ROCKER_MSIX_VEC_TX, - __ROCKER_MSIX_VEC_RX, -#define ROCKER_MSIX_VEC_TX(port) \ - (__ROCKER_MSIX_VEC_TX + ((port) * 2)) -#define ROCKER_MSIX_VEC_RX(port) \ - (__ROCKER_MSIX_VEC_RX + ((port) * 2)) -#define ROCKER_MSIX_VEC_COUNT(portcnt) \ - (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) -}; - -/* Rocker bogus registers */ -#define ROCKER_BOGUS_REG0 0x0000 -#define ROCKER_BOGUS_REG1 0x0004 -#define ROCKER_BOGUS_REG2 0x0008 -#define ROCKER_BOGUS_REG3 0x000c - -/* Rocker test registers */ -#define ROCKER_TEST_REG 0x0010 -#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ -#define ROCKER_TEST_IRQ 0x0020 -#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ -#define ROCKER_TEST_DMA_SIZE 0x0030 -#define ROCKER_TEST_DMA_CTRL 0x0034 - -/* Rocker test register ctrl */ -#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0) -#define ROCKER_TEST_DMA_CTRL_FILL BIT(1) -#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2) - -/* Rocker DMA ring register offsets */ -#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ -#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) -#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) -#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) -#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) -#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) -#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) - -/* Rocker dma ctrl register bits */ -#define ROCKER_DMA_DESC_CTRL_RESET BIT(0) - -/* Rocker DMA ring types */ -enum rocker_dma_type { - ROCKER_DMA_CMD, - ROCKER_DMA_EVENT, - __ROCKER_DMA_TX, - __ROCKER_DMA_RX, -#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) -#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) -}; - -/* Rocker DMA ring size limits and default sizes */ -#define ROCKER_DMA_SIZE_MIN 2ul -#define ROCKER_DMA_SIZE_MAX 65536ul -#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul -#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul -#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul -#define ROCKER_DMA_TX_DESC_SIZE 256 -#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul -#define ROCKER_DMA_RX_DESC_SIZE 256 - -/* Rocker DMA descriptor struct */ -struct rocker_desc { - u64 buf_addr; - u64 cookie; - u16 buf_size; - u16 tlv_size; - u16 resv[5]; - u16 comp_err; -}; - -#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15) - -/* Rocker DMA TLV struct */ -struct rocker_tlv { - u32 type; - u16 len; -}; - -/* TLVs */ -enum { - ROCKER_TLV_CMD_UNSPEC, - ROCKER_TLV_CMD_TYPE, /* u16 */ - ROCKER_TLV_CMD_INFO, /* nest */ - - __ROCKER_TLV_CMD_MAX, - ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_TYPE_UNSPEC, - ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, - - ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS, - ROCKER_TLV_CMD_TYPE_GET_PORT_STATS, - - __ROCKER_TLV_CMD_TYPE_MAX, - ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, - ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ - ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ - ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ - ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ - ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ - - __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - ROCKER_TLV_CMD_PORT_SETTINGS_MAX = - __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_PORT_STATS_UNSPEC, - ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ - - ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */ - - ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */ - - __ROCKER_TLV_CMD_PORT_STATS_MAX, - ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1, -}; - -enum rocker_port_mode { - ROCKER_PORT_MODE_OF_DPA, -}; - -enum { - ROCKER_TLV_EVENT_UNSPEC, - ROCKER_TLV_EVENT_TYPE, /* u16 */ - ROCKER_TLV_EVENT_INFO, /* nest */ - - __ROCKER_TLV_EVENT_MAX, - ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_TYPE_UNSPEC, - ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, - ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, - - __ROCKER_TLV_EVENT_TYPE_MAX, - ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, - ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ - ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ - - __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, - ROCKER_TLV_EVENT_LINK_CHANGED_MAX = - __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, - ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ - ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ - ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ - - __ROCKER_TLV_EVENT_MAC_VLAN_MAX, - ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, -}; - -enum { - ROCKER_TLV_RX_UNSPEC, - ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ - ROCKER_TLV_RX_CSUM, /* u16 */ - ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ - ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ - ROCKER_TLV_RX_FRAG_LEN, /* u16 */ - - __ROCKER_TLV_RX_MAX, - ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, -}; - -#define ROCKER_RX_FLAGS_IPV4 BIT(0) -#define ROCKER_RX_FLAGS_IPV6 BIT(1) -#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2) -#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3) -#define ROCKER_RX_FLAGS_IP_FRAG BIT(4) -#define ROCKER_RX_FLAGS_TCP BIT(5) -#define ROCKER_RX_FLAGS_UDP BIT(6) -#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) -#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) - -enum { - ROCKER_TLV_TX_UNSPEC, - ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ - ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ - ROCKER_TLV_TX_TSO_MSS, /* u16 */ - ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ - ROCKER_TLV_TX_FRAGS, /* array */ - - __ROCKER_TLV_TX_MAX, - ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, -}; - -#define ROCKER_TX_OFFLOAD_NONE 0 -#define ROCKER_TX_OFFLOAD_IP_CSUM 1 -#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 -#define ROCKER_TX_OFFLOAD_L3_CSUM 3 -#define ROCKER_TX_OFFLOAD_TSO 4 - -#define ROCKER_TX_FRAGS_MAX 16 - -enum { - ROCKER_TLV_TX_FRAG_UNSPEC, - ROCKER_TLV_TX_FRAG, /* nest */ - - __ROCKER_TLV_TX_FRAG_MAX, - ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, -}; - -enum { - ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, - ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ - ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ - - __ROCKER_TLV_TX_FRAG_ATTR_MAX, - ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, -}; - -/* cmd info nested for OF-DPA msgs */ -enum { - ROCKER_TLV_OF_DPA_UNSPEC, - ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ - ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ - ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ - ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ - ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ - ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ - ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ - ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ - ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ - ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ - ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ - ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ - ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ - ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ - ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ - ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ - ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ - ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ - ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ - ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ - ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ - ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ - ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ - ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ - ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ - ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ - ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ - ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ - ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ - ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ - ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ - ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ - - __ROCKER_TLV_OF_DPA_MAX, - ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, -}; - -/* OF-DPA table IDs */ - -enum rocker_of_dpa_table_id { - ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, - ROCKER_OF_DPA_TABLE_ID_VLAN = 10, - ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, - ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, - ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, - ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, -}; - -/* OF-DPA flow stats */ -enum { - ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, - ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ - ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ - ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ - - __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, - ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, -}; - -/* OF-DPA group types */ -enum rocker_of_dpa_group_type { - ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, - ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, - ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, - ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, - ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, - ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, - ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, - ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, - ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, -}; - -/* OF-DPA group L2 overlay types */ -enum rocker_of_dpa_overlay_type { - ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, - ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, - ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, - ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, -}; - -/* OF-DPA group ID encoding */ -#define ROCKER_GROUP_TYPE_SHIFT 28 -#define ROCKER_GROUP_TYPE_MASK 0xf0000000 -#define ROCKER_GROUP_VLAN_SHIFT 16 -#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 -#define ROCKER_GROUP_PORT_SHIFT 0 -#define ROCKER_GROUP_PORT_MASK 0x0000ffff -#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 -#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 -#define ROCKER_GROUP_SUBTYPE_SHIFT 10 -#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 -#define ROCKER_GROUP_INDEX_SHIFT 0 -#define ROCKER_GROUP_INDEX_MASK 0x0000ffff -#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 -#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff - -#define ROCKER_GROUP_TYPE_GET(group_id) \ - (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) -#define ROCKER_GROUP_TYPE_SET(type) \ - (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) -#define ROCKER_GROUP_VLAN_GET(group_id) \ - (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) -#define ROCKER_GROUP_VLAN_SET(vlan_id) \ - (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) -#define ROCKER_GROUP_PORT_GET(group_id) \ - (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) -#define ROCKER_GROUP_PORT_SET(port) \ - (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) -#define ROCKER_GROUP_INDEX_GET(group_id) \ - (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) -#define ROCKER_GROUP_INDEX_SET(index) \ - (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) -#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ - (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ - ROCKER_GROUP_INDEX_LONG_SHIFT) -#define ROCKER_GROUP_INDEX_LONG_SET(index) \ - (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ - ROCKER_GROUP_INDEX_LONG_MASK) - -#define ROCKER_GROUP_NONE 0 -#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) -#define ROCKER_GROUP_L2_REWRITE(index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ - ROCKER_GROUP_INDEX_LONG_SET(index)) -#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) -#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) -#define ROCKER_GROUP_L3_UNICAST(index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ - ROCKER_GROUP_INDEX_LONG_SET(index)) - -/* Rocker general purpose registers */ -#define ROCKER_CONTROL 0x0300 -#define ROCKER_PORT_PHYS_COUNT 0x0304 -#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ -#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ -#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ - -/* Rocker control bits */ -#define ROCKER_CONTROL_RESET BIT(0) +#include <linux/netdevice.h> +#include <net/neighbour.h> +#include <net/switchdev.h> + +#include "rocker_hw.h" + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + dma_addr_t mapaddr; +}; + +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +struct rocker_port { + struct net_device *dev; + struct rocker *rocker; + void *wpriv; + unsigned int port_number; + u32 pport; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker_world_ops; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; /* for cmd ring accesses */ + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; + struct rocker_world_ops *wops; + void *wpriv; +}; + +typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv); + +int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, + rocker_cmd_prep_cb_t prepare, void *prepare_priv, + rocker_cmd_proc_cb_t process, void *process_priv); + +int rocker_port_set_learning(struct rocker_port *rocker_port, + bool learning); + +struct rocker_world_ops { + const char *kind; + size_t priv_size; + size_t port_priv_size; + u8 mode; + int (*init)(struct rocker *rocker); + void (*fini)(struct rocker *rocker); + int (*port_pre_init)(struct rocker_port *rocker_port); + int (*port_init)(struct rocker_port *rocker_port); + void (*port_fini)(struct rocker_port *rocker_port); + void (*port_post_fini)(struct rocker_port *rocker_port); + int (*port_open)(struct rocker_port *rocker_port); + void (*port_stop)(struct rocker_port *rocker_port); + int (*port_attr_stp_state_set)(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans); + int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans); + int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags); + int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans); + int (*port_obj_vlan_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); + int (*port_obj_vlan_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan); + int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb); + int (*port_obj_fib4_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans); + int (*port_obj_fib4_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4); + int (*port_obj_fdb_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); + int (*port_obj_fdb_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb); + int (*port_obj_fdb_dump)(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb); + int (*port_master_linked)(struct rocker_port *rocker_port, + struct net_device *master); + int (*port_master_unlinked)(struct rocker_port *rocker_port, + struct net_device *master); + int (*port_neigh_update)(struct rocker_port *rocker_port, + struct neighbour *n); + int (*port_neigh_destroy)(struct rocker_port *rocker_port, + struct neighbour *n); + int (*port_ev_mac_vlan_seen)(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id); +}; + +extern struct rocker_world_ops rocker_ofdpa_ops; #endif diff --git a/drivers/net/ethernet/rocker/rocker_hw.h b/drivers/net/ethernet/rocker/rocker_hw.h new file mode 100644 index 000000000000..2adfe88859f2 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_hw.h @@ -0,0 +1,467 @@ +/* + * drivers/net/ethernet/rocker/rocker_hw.h - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_HW_H +#define _ROCKER_HW_H + +#include <linux/types.h> + +/* Return codes */ +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + +#define ROCKER_FP_PORTS_MAX 62 + +#define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 + +#define ROCKER_PCI_BAR0_SIZE 0x2000 + +/* MSI-X vectors */ +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) +}; + +/* Rocker bogus registers */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* Rocker test registers */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* Rocker test register ctrl */ +#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0) +#define ROCKER_TEST_DMA_CTRL_FILL BIT(1) +#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2) + +/* Rocker DMA ring register offsets */ +#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) +#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) +#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) +#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) +#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) +#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) + +/* Rocker dma ctrl register bits */ +#define ROCKER_DMA_DESC_CTRL_RESET BIT(0) + +/* Rocker DMA ring types */ +enum rocker_dma_type { + ROCKER_DMA_CMD, + ROCKER_DMA_EVENT, + __ROCKER_DMA_TX, + __ROCKER_DMA_RX, +#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) +#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) +}; + +/* Rocker DMA ring size limits and default sizes */ +#define ROCKER_DMA_SIZE_MIN 2ul +#define ROCKER_DMA_SIZE_MAX 65536ul +#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul +#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul +#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_TX_DESC_SIZE 256 +#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_RX_DESC_SIZE 256 + +/* Rocker DMA descriptor struct */ +struct rocker_desc { + u64 buf_addr; + u64 cookie; + u16 buf_size; + u16 tlv_size; + u16 resv[5]; + u16 comp_err; +}; + +#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15) + +/* Rocker DMA TLV struct */ +struct rocker_tlv { + u32 type; + u16 len; +}; + +/* TLVs */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_STATS_UNSPEC, + ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ + + ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */ + + ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */ + + __ROCKER_TLV_CMD_PORT_STATS_MAX, + ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1, +}; + +enum rocker_port_mode { + ROCKER_PORT_MODE_OF_DPA, +}; + +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 BIT(0) +#define ROCKER_RX_FLAGS_IPV6 BIT(1) +#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3) +#define ROCKER_RX_FLAGS_IP_FRAG BIT(4) +#define ROCKER_RX_FLAGS_TCP BIT(5) +#define ROCKER_RX_FLAGS_UDP BIT(6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) +#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) + +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* cmd info nested for OF-DPA msgs */ +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* OF-DPA table IDs */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* OF-DPA flow stats */ +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* OF-DPA group types */ +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* OF-DPA group L2 overlay types */ +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* OF-DPA group ID encoding */ +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_SHIFT 16 +#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* Rocker general purpose registers */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* Rocker control bits */ +#define ROCKER_CONTROL_RESET BIT(0) + +#endif diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c new file mode 100644 index 000000000000..acafbf870182 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -0,0 +1,2910 @@ +/* + * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/sort.h> +#include <linux/random.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/if_bridge.h> +#include <linux/bitops.h> +#include <linux/ctype.h> +#include <net/switchdev.h> +#include <net/rtnetlink.h> +#include <net/netevent.h> +#include <net/arp.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <generated/utsrelease.h> + +#include "rocker_hw.h" +#include "rocker.h" +#include "rocker_tlv.h" + +static const char rocker_driver_name[] = "rocker"; + +static const struct pci_device_id rocker_pci_id_table[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, + {0, } +}; + +struct rocker_wait { + wait_queue_head_t wait; + bool done; + bool nowait; +}; + +static void rocker_wait_reset(struct rocker_wait *wait) +{ + wait->done = false; + wait->nowait = false; +} + +static void rocker_wait_init(struct rocker_wait *wait) +{ + init_waitqueue_head(&wait->wait); + rocker_wait_reset(wait); +} + +static struct rocker_wait *rocker_wait_create(void) +{ + struct rocker_wait *wait; + + wait = kzalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) + return NULL; + return wait; +} + +static void rocker_wait_destroy(struct rocker_wait *wait) +{ + kfree(wait); +} + +static bool rocker_wait_event_timeout(struct rocker_wait *wait, + unsigned long timeout) +{ + wait_event_timeout(wait->wait, wait->done, HZ / 10); + if (!wait->done) + return false; + return true; +} + +static void rocker_wait_wake_up(struct rocker_wait *wait) +{ + wait->done = true; + wake_up(&wait->wait); +} + +static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) +{ + return rocker->msix_entries[vector].vector; +} + +static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_TX(rocker_port->port_number)); +} + +static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_RX(rocker_port->port_number)); +} + +#define rocker_write32(rocker, reg, val) \ + writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read32(rocker, reg) \ + readl((rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_write64(rocker, reg, val) \ + writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read64(rocker, reg) \ + readq((rocker)->hw_addr + (ROCKER_ ## reg)) + +/***************************** + * HW basic testing functions + *****************************/ + +static int rocker_reg_test(const struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + u64 test_reg; + u64 rnd; + + rnd = prandom_u32(); + rnd >>= 1; + rocker_write32(rocker, TEST_REG, rnd); + test_reg = rocker_read32(rocker, TEST_REG); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", + test_reg, rnd * 2); + return -EIO; + } + + rnd = prandom_u32(); + rnd <<= 31; + rnd |= prandom_u32(); + rocker_write64(rocker, TEST_REG64, rnd); + test_reg = rocker_read64(rocker, TEST_REG64); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", + test_reg, rnd * 2); + return -EIO; + } + + return 0; +} + +static int rocker_dma_test_one(const struct rocker *rocker, + struct rocker_wait *wait, u32 test_type, + dma_addr_t dma_handle, const unsigned char *buf, + const unsigned char *expect, size_t size) +{ + const struct pci_dev *pdev = rocker->pdev; + int i; + + rocker_wait_reset(wait); + rocker_write32(rocker, TEST_DMA_CTRL, test_type); + + if (!rocker_wait_event_timeout(wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + return -EIO; + } + + for (i = 0; i < size; i++) { + if (buf[i] != expect[i]) { + dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", + buf[i], i, expect[i]); + return -EIO; + } + } + return 0; +} + +#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) +#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 + +static int rocker_dma_test_offset(const struct rocker *rocker, + struct rocker_wait *wait, int offset) +{ + struct pci_dev *pdev = rocker->pdev; + unsigned char *alloc; + unsigned char *buf; + unsigned char *expect; + dma_addr_t dma_handle; + int i; + int err; + + alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, + GFP_KERNEL | GFP_DMA); + if (!alloc) + return -ENOMEM; + buf = alloc + offset; + expect = buf + ROCKER_TEST_DMA_BUF_SIZE; + + dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dma_handle)) { + err = -EIO; + goto free_alloc; + } + + rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); + rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); + + memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); + for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) + expect[i] = ~buf[i]; + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + +unmap: + pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); +free_alloc: + kfree(alloc); + + return err; +} + +static int rocker_dma_test(const struct rocker *rocker, + struct rocker_wait *wait) +{ + int i; + int err; + + for (i = 0; i < 8; i++) { + err = rocker_dma_test_offset(rocker, wait, i); + if (err) + return err; + } + return 0; +} + +static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) +{ + struct rocker_wait *wait = dev_id; + + rocker_wait_wake_up(wait); + + return IRQ_HANDLED; +} + +static int rocker_basic_hw_test(const struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + struct rocker_wait wait; + int err; + + err = rocker_reg_test(rocker); + if (err) { + dev_err(&pdev->dev, "reg test failed\n"); + return err; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), + rocker_test_irq_handler, 0, + rocker_driver_name, &wait); + if (err) { + dev_err(&pdev->dev, "cannot assign test irq\n"); + return err; + } + + rocker_wait_init(&wait); + rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); + + if (!rocker_wait_event_timeout(&wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + err = -EIO; + goto free_irq; + } + + err = rocker_dma_test(rocker, &wait); + if (err) + dev_err(&pdev->dev, "dma test failed\n"); + +free_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); + return err; +} + +/****************************************** + * DMA rings and descriptors manipulations + ******************************************/ + +static u32 __pos_inc(u32 pos, size_t limit) +{ + return ++pos == limit ? 0 : pos; +} + +static int rocker_desc_err(const struct rocker_desc_info *desc_info) +{ + int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; + + switch (err) { + case ROCKER_OK: + return 0; + case -ROCKER_ENOENT: + return -ENOENT; + case -ROCKER_ENXIO: + return -ENXIO; + case -ROCKER_ENOMEM: + return -ENOMEM; + case -ROCKER_EEXIST: + return -EEXIST; + case -ROCKER_EINVAL: + return -EINVAL; + case -ROCKER_EMSGSIZE: + return -EMSGSIZE; + case -ROCKER_ENOTSUP: + return -EOPNOTSUPP; + case -ROCKER_ENOBUFS: + return -ENOBUFS; + } + + return -EINVAL; +} + +static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) +{ + desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; +} + +static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) +{ + u32 comp_err = desc_info->desc->comp_err; + + return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; +} + +static void * +rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) +{ + return (void *)(uintptr_t)desc_info->desc->cookie; +} + +static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, + void *ptr) +{ + desc_info->desc->cookie = (uintptr_t) ptr; +} + +static struct rocker_desc_info * +rocker_desc_head_get(const struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + u32 head = __pos_inc(info->head, info->size); + + desc_info = &info->desc_info[info->head]; + if (head == info->tail) + return NULL; /* ring full */ + desc_info->tlv_size = 0; + return desc_info; +} + +static void rocker_desc_commit(const struct rocker_desc_info *desc_info) +{ + desc_info->desc->buf_size = desc_info->data_size; + desc_info->desc->tlv_size = desc_info->tlv_size; +} + +static void rocker_desc_head_set(const struct rocker *rocker, + struct rocker_dma_ring_info *info, + const struct rocker_desc_info *desc_info) +{ + u32 head = __pos_inc(info->head, info->size); + + BUG_ON(head == info->tail); + rocker_desc_commit(desc_info); + info->head = head; + rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); +} + +static struct rocker_desc_info * +rocker_desc_tail_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + + if (info->tail == info->head) + return NULL; /* nothing to be done between head and tail */ + desc_info = &info->desc_info[info->tail]; + if (!rocker_desc_gen(desc_info)) + return NULL; /* gen bit not set, desc is not ready yet */ + info->tail = __pos_inc(info->tail, info->size); + desc_info->tlv_size = desc_info->desc->tlv_size; + return desc_info; +} + +static void rocker_dma_ring_credits_set(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + u32 credits) +{ + if (credits) + rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); +} + +static unsigned long rocker_dma_ring_size_fix(size_t size) +{ + return max(ROCKER_DMA_SIZE_MIN, + min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); +} + +static int rocker_dma_ring_create(const struct rocker *rocker, + unsigned int type, + size_t size, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(size != rocker_dma_ring_size_fix(size)); + info->size = size; + info->type = type; + info->head = 0; + info->tail = 0; + info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), + GFP_KERNEL); + if (!info->desc_info) + return -ENOMEM; + + info->desc = pci_alloc_consistent(rocker->pdev, + info->size * sizeof(*info->desc), + &info->mapaddr); + if (!info->desc) { + kfree(info->desc_info); + return -ENOMEM; + } + + for (i = 0; i < info->size; i++) + info->desc_info[i].desc = &info->desc[i]; + + rocker_write32(rocker, DMA_DESC_CTRL(info->type), + ROCKER_DMA_DESC_CTRL_RESET); + rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); + rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); + + return 0; +} + +static void rocker_dma_ring_destroy(const struct rocker *rocker, + const struct rocker_dma_ring_info *info) +{ + rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); + + pci_free_consistent(rocker->pdev, + info->size * sizeof(struct rocker_desc), + info->desc, info->mapaddr); + kfree(info->desc_info); +} + +static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(info->head || info->tail); + + /* When ring is consumer, we need to advance head for each desc. + * That tells hw that the desc is ready to be used by it. + */ + for (i = 0; i < info->size - 1; i++) + rocker_desc_head_set(rocker, info, &info->desc_info[i]); + rocker_desc_commit(&info->desc_info[i]); +} + +static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + int direction, size_t buf_size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + int err; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + dma_addr_t dma_handle; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto rollback; + } + + dma_handle = pci_map_single(pdev, buf, buf_size, direction); + if (pci_dma_mapping_error(pdev, dma_handle)) { + kfree(buf); + err = -EIO; + goto rollback; + } + + desc_info->data = buf; + desc_info->data_size = buf_size; + dma_unmap_addr_set(desc_info, mapaddr, dma_handle); + + desc->buf_addr = dma_handle; + desc->buf_size = buf_size; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + const struct rocker_desc_info *desc_info = &info->desc_info[i]; + + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } + return err; +} + +static void rocker_dma_ring_bufs_free(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + int direction) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + for (i = 0; i < info->size; i++) { + const struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + + desc->buf_addr = 0; + desc->buf_size = 0; + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } +} + +static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info) +{ + struct rocker_wait *wait; + + wait = rocker_wait_create(); + if (!wait) + return -ENOMEM; + rocker_desc_cookie_ptr_set(desc_info, wait); + return 0; +} + +static void +rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info) +{ + struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info); + + rocker_wait_destroy(wait); +} + +static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker) +{ + const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; + int i; + int err; + + for (i = 0; i < cmd_ring->size; i++) { + err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); + return err; +} + +static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker) +{ + const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; + int i; + + for (i = 0; i < cmd_ring->size; i++) + rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); +} + +static int rocker_dma_rings_init(struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, + ROCKER_DMA_CMD_DEFAULT_SIZE, + &rocker->cmd_ring); + if (err) { + dev_err(&pdev->dev, "failed to create command dma ring\n"); + return err; + } + + spin_lock_init(&rocker->cmd_ring_lock); + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); + goto err_dma_cmd_ring_bufs_alloc; + } + + err = rocker_dma_cmd_ring_waits_alloc(rocker); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring waits\n"); + goto err_dma_cmd_ring_waits_alloc; + } + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, + ROCKER_DMA_EVENT_DEFAULT_SIZE, + &rocker->event_ring); + if (err) { + dev_err(&pdev->dev, "failed to create event dma ring\n"); + goto err_dma_event_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, + PCI_DMA_FROMDEVICE, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); + goto err_dma_event_ring_bufs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); + return 0; + +err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); +err_dma_event_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_waits_alloc: + rocker_dma_cmd_ring_waits_free(rocker); +err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +} + +static void rocker_dma_rings_fini(struct rocker *rocker) +{ + rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_cmd_ring_waits_free(rocker); + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); +} + +static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + struct sk_buff *skb, size_t buf_len) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + + dma_handle = pci_map_single(pdev, skb->data, buf_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, dma_handle)) + return -EIO; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) + goto tlv_put_failure; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) + goto tlv_put_failure; + return 0; + +tlv_put_failure: + pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); + desc_info->tlv_size = 0; + return -EMSGSIZE; +} + +static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) +{ + return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +} + +static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct net_device *dev = rocker_port->dev; + struct sk_buff *skb; + size_t buf_len = rocker_port_rx_buf_len(rocker_port); + int err; + + /* Ensure that hw will see tlv_size zero in case of an error. + * That tells hw to use another descriptor. + */ + rocker_desc_cookie_ptr_set(desc_info, NULL); + desc_info->tlv_size = 0; + + skb = netdev_alloc_skb_ip_align(dev, buf_len); + if (!skb) + return -ENOMEM; + err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); + if (err) { + dev_kfree_skb_any(skb); + return err; + } + rocker_desc_cookie_ptr_set(desc_info, skb); + return 0; +} + +static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, + const struct rocker_tlv **attrs) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + size_t len; + + if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || + !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) + return; + dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); + len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); +} + +static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, + const struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + + if (!skb) + return; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + dev_kfree_skb_any(skb); +} + +static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) +{ + const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; + int i; + int err; + + for (i = 0; i < rx_ring->size; i++) { + err = rocker_dma_rx_ring_skb_alloc(rocker_port, + &rx_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); + return err; +} + +static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) +{ + const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; + int i; + + for (i = 0; i < rx_ring->size; i++) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); +} + +static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + int err; + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_TX(rocker_port->port_number), + ROCKER_DMA_TX_DEFAULT_SIZE, + &rocker_port->tx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); + return err; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE, + ROCKER_DMA_TX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); + goto err_dma_tx_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_RX(rocker_port->port_number), + ROCKER_DMA_RX_DEFAULT_SIZE, + &rocker_port->rx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); + goto err_dma_rx_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL, + ROCKER_DMA_RX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); + goto err_dma_rx_ring_bufs_alloc; + } + + err = rocker_dma_rx_ring_skbs_alloc(rocker_port); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); + goto err_dma_rx_ring_skbs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); + + return 0; + +err_dma_rx_ring_skbs_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_rx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); +err_dma_rx_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); +err_dma_tx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); + return err; +} + +static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + + rocker_dma_rx_ring_skbs_free(rocker_port); + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); +} + +static void rocker_port_set_enable(const struct rocker_port *rocker_port, + bool enable) +{ + u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); + + if (enable) + val |= 1ULL << rocker_port->pport; + else + val &= ~(1ULL << rocker_port->pport); + rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); +} + +/******************************** + * Interrupt handler and helpers + ********************************/ + +static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + const struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + u32 credits = 0; + + spin_lock(&rocker->cmd_ring_lock); + while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { + wait = rocker_desc_cookie_ptr_get(desc_info); + if (wait->nowait) { + rocker_desc_gen_clear(desc_info); + } else { + rocker_wait_wake_up(wait); + } + credits++; + } + spin_unlock(&rocker->cmd_ring_lock); + rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); + + return IRQ_HANDLED; +} + +static void rocker_port_link_up(const struct rocker_port *rocker_port) +{ + netif_carrier_on(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is up\n"); +} + +static void rocker_port_link_down(const struct rocker_port *rocker_port) +{ + netif_carrier_off(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is down\n"); +} + +static int rocker_event_link_change(const struct rocker *rocker, + const struct rocker_tlv *info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; + unsigned int port_number; + bool link_up; + struct rocker_port *rocker_port; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || + !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; + link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + if (netif_carrier_ok(rocker_port->dev) != link_up) { + if (link_up) + rocker_port_link_up(rocker_port); + else + rocker_port_link_down(rocker_port); + } + + return 0; +} + +static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id); + +static int rocker_event_mac_vlan_seen(const struct rocker *rocker, + const struct rocker_tlv *info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; + unsigned int port_number; + struct rocker_port *rocker_port; + const unsigned char *addr; + __be16 vlan_id; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; + addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); + vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); +} + +static int rocker_event_process(const struct rocker *rocker, + const struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; + const struct rocker_tlv *info; + u16 type; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); + if (!attrs[ROCKER_TLV_EVENT_TYPE] || + !attrs[ROCKER_TLV_EVENT_INFO]) + return -EIO; + + type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); + info = attrs[ROCKER_TLV_EVENT_INFO]; + + switch (type) { + case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: + return rocker_event_link_change(rocker, info); + case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: + return rocker_event_mac_vlan_seen(rocker, info); + } + + return -EOPNOTSUPP; +} + +static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + const struct pci_dev *pdev = rocker->pdev; + const struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + dev_err(&pdev->dev, "event desc received with err %d\n", + err); + } else { + err = rocker_event_process(rocker, desc_info); + if (err) + dev_err(&pdev->dev, "event processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); + credits++; + } + rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); + + return IRQ_HANDLED; +} + +static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_tx); + return IRQ_HANDLED; +} + +static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_rx); + return IRQ_HANDLED; +} + +/******************** + * Command interface + ********************/ + +int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, + rocker_cmd_prep_cb_t prepare, void *prepare_priv, + rocker_cmd_proc_cb_t process, void *process_priv) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + unsigned long lock_flags; + int err; + + spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); + + desc_info = rocker_desc_head_get(&rocker->cmd_ring); + if (!desc_info) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + return -EAGAIN; + } + + wait = rocker_desc_cookie_ptr_get(desc_info); + rocker_wait_init(wait); + wait->nowait = nowait; + + err = prepare(rocker_port, desc_info, prepare_priv); + if (err) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + return err; + } + + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + + if (nowait) + return 0; + + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; + + err = rocker_desc_err(desc_info); + if (err) + return err; + + if (process) + err = process(rocker_port, desc_info, process_priv); + + rocker_desc_gen_clear(desc_info); + return err; +} + +static int +rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + u32 speed; + u8 duplex; + u8 autoneg; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) + return -EIO; + + speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = SUPPORTED_TP; + ecmd->phy_address = 0xff; + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; + if (!attr) + return -EIO; + + if (rocker_tlv_len(attr) != ETH_ALEN) + return -EINVAL; + + ether_addr_copy(macaddr, rocker_tlv_data(attr)); + return 0; +} + +static int +rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + u8 *p_mode = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]; + if (!attr) + return -EIO; + + *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]); + return 0; +} + +struct port_name { + char *buf; + size_t len; +}; + +static int +rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct port_name *name = priv; + const struct rocker_tlv *attr; + size_t i, j, len; + const char *str; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; + if (!attr) + return -EIO; + + len = min_t(size_t, rocker_tlv_len(attr), name->len); + str = rocker_tlv_data(attr); + + /* make sure name only contains alphanumeric characters */ + for (i = j = 0; i < len; ++i) { + if (isalnum(str[i])) { + name->buf[j] = str[i]; + j++; + } + } + + if (j == 0) + return -EIO; + + name->buf[j] = '\0'; + + return 0; +} + +static int +rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, + ethtool_cmd_speed(ecmd))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, + ecmd->duplex)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, + ecmd->autoneg)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const unsigned char *macaddr = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + ETH_ALEN, macaddr)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + int mtu = *(int *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, + mtu)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + bool learning = *(bool *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, + learning)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_ethtool_proc, + ecmd); +} + +static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_macaddr_proc, + macaddr); +} + +static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, + u8 *p_mode) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_mode_proc, p_mode); +} + +static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_settings_ethtool_prep, + ecmd, NULL, NULL); +} + +static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_settings_macaddr_prep, + macaddr, NULL, NULL); +} + +static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, + int mtu) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_settings_mtu_prep, + &mtu, NULL, NULL); +} + +int rocker_port_set_learning(struct rocker_port *rocker_port, + bool learning) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_learning_prep, + &learning, NULL, NULL); +} + +/********************** + * Worlds manipulation + **********************/ + +static struct rocker_world_ops *rocker_world_ops[] = { + &rocker_ofdpa_ops, +}; + +#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops) + +static struct rocker_world_ops *rocker_world_ops_find(u8 mode) +{ + int i; + + for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++) + if (rocker_world_ops[i]->mode == mode) + return rocker_world_ops[i]; + return NULL; +} + +static int rocker_world_init(struct rocker *rocker, u8 mode) +{ + struct rocker_world_ops *wops; + int err; + + wops = rocker_world_ops_find(mode); + if (!wops) { + dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n", + mode); + return -EINVAL; + } + rocker->wops = wops; + rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL); + if (!rocker->wpriv) + return -ENOMEM; + if (!wops->init) + return 0; + err = wops->init(rocker); + if (err) + kfree(rocker->wpriv); + return err; +} + +static void rocker_world_fini(struct rocker *rocker) +{ + struct rocker_world_ops *wops = rocker->wops; + + if (!wops || !wops->fini) + return; + wops->fini(rocker); + kfree(rocker->wpriv); +} + +static int rocker_world_check_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + u8 mode; + int err; + + err = rocker_cmd_get_port_settings_mode(rocker_port, &mode); + if (err) { + dev_err(&rocker->pdev->dev, "failed to get port mode\n"); + return err; + } + if (rocker->wops) { + if (rocker->wops->mode != mode) { + dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); + return err; + } + return 0; + } + return rocker_world_init(rocker, mode); +} + +static int rocker_world_port_pre_init(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + int err; + + rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL); + if (!rocker_port->wpriv) + return -ENOMEM; + if (!wops->port_pre_init) + return 0; + err = wops->port_pre_init(rocker_port); + if (err) + kfree(rocker_port->wpriv); + return 0; +} + +static int rocker_world_port_init(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_init) + return 0; + return wops->port_init(rocker_port); +} + +static void rocker_world_port_fini(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_fini) + return; + wops->port_fini(rocker_port); +} + +static void rocker_world_port_post_fini(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_post_fini) + return; + wops->port_post_fini(rocker_port); + kfree(rocker_port->wpriv); +} + +static int rocker_world_port_open(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_open) + return 0; + return wops->port_open(rocker_port); +} + +static void rocker_world_port_stop(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_stop) + return; + wops->port_stop(rocker_port); +} + +static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_stp_state_set) + return -EOPNOTSUPP; + return wops->port_attr_stp_state_set(rocker_port, state, trans); +} + +static int +rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_flags_set) + return -EOPNOTSUPP; + return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, + trans); +} + +static int +rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_flags_get) + return -EOPNOTSUPP; + return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); +} + +static int +rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans) + +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_ageing_time_set) + return -EOPNOTSUPP; + return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time, + trans); +} + +static int +rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_add) + return -EOPNOTSUPP; + return 0; + return wops->port_obj_vlan_add(rocker_port, vlan, trans); +} + +static int +rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_del) + return -EOPNOTSUPP; + return wops->port_obj_vlan_del(rocker_port, vlan); +} + +static int +rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_dump) + return -EOPNOTSUPP; + return wops->port_obj_vlan_dump(rocker_port, vlan, cb); +} + +static int +rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fib4_add) + return -EOPNOTSUPP; + return wops->port_obj_fib4_add(rocker_port, fib4, trans); +} + +static int +rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fib4_del) + return -EOPNOTSUPP; + return wops->port_obj_fib4_del(rocker_port, fib4); +} + +static int +rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_add) + return -EOPNOTSUPP; + return wops->port_obj_fdb_add(rocker_port, fdb, trans); +} + +static int +rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_del) + return -EOPNOTSUPP; + return wops->port_obj_fdb_del(rocker_port, fdb); +} + +static int +rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_dump) + return -EOPNOTSUPP; + return wops->port_obj_fdb_dump(rocker_port, fdb, cb); +} + +static int rocker_world_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_master_linked) + return -EOPNOTSUPP; + return wops->port_master_linked(rocker_port, master); +} + +static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_master_unlinked) + return -EOPNOTSUPP; + return wops->port_master_unlinked(rocker_port, master); +} + +static int rocker_world_port_neigh_update(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_neigh_update) + return -EOPNOTSUPP; + return wops->port_neigh_update(rocker_port, n); +} + +static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_neigh_destroy) + return -EOPNOTSUPP; + return wops->port_neigh_destroy(rocker_port, n); +} + +static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_ev_mac_vlan_seen) + return -EOPNOTSUPP; + return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); +} + +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; + + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } + + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } + + err = rocker_world_port_open(rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot open port in world\n"); + goto err_world_port_open; + } + + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + if (!dev->proto_down) + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); + return 0; + +err_world_port_open: + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; +} + +static int rocker_port_stop(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + rocker_world_port_stop(rocker_port); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); + + return 0; +} + +static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } +} + +static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; + + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; + } + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); + return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; +} + +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (unlikely(!desc_info)) { + if (net_ratelimit()) + netdev_err(dev, "tx ring full when queue awake\n"); + return NETDEV_TX_BUSY; + } + + rocker_desc_cookie_ptr_set(desc_info, skb); + + frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); + if (!frags) + goto out; + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb->data, skb_headlen(skb)); + if (err) + goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { + err = skb_linearize(skb); + if (err) + goto unmap_frags; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb_frag_address(frag), + skb_frag_size(frag)); + if (err) + goto unmap_frags; + } + rocker_tlv_nest_end(desc_info, frags); + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (!desc_info) + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +unmap_frags: + rocker_tx_desc_frags_unmap(rocker_port, desc_info); +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frags); +out: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static int rocker_port_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int running = netif_running(dev); + int err; + +#define ROCKER_PORT_MIN_MTU 68 +#define ROCKER_PORT_MAX_MTU 9000 + + if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) + return -EINVAL; + + if (running) + rocker_port_stop(dev); + + netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); + dev->mtu = new_mtu; + + err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); + if (err) + return err; + + if (running) + err = rocker_port_open(dev); + + return err; +} + +static int rocker_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct port_name name = { .buf = buf, .len = len }; + int err; + + err = rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_phys_name_proc, + &name); + + return err ? -EOPNOTSUPP : 0; +} + +static int rocker_port_change_proto_down(struct net_device *dev, + bool proto_down) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_port->dev->flags & IFF_UP) + rocker_port_set_enable(rocker_port, !proto_down); + rocker_port->dev->proto_down = proto_down; + return 0; +} + +static void rocker_port_neigh_destroy(struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(n->dev); + int err; + + err = rocker_world_port_neigh_destroy(rocker_port, n); + if (err) + netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n", + err); +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_change_mtu = rocker_port_change_mtu, + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, + .ndo_fdb_add = switchdev_port_fdb_add, + .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_dump = switchdev_port_fdb_dump, + .ndo_get_phys_port_name = rocker_port_get_phys_port_name, + .ndo_change_proto_down = rocker_port_change_proto_down, + .ndo_neigh_destroy = rocker_port_neigh_destroy, +}; + +/******************** + * swdev interface + ********************/ + +static int rocker_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + const struct rocker *rocker = rocker_port->rocker; + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(rocker->hw.id); + memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = rocker_world_port_attr_bridge_flags_get(rocker_port, + &attr->u.brport_flags); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static int rocker_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = rocker_world_port_attr_stp_state_set(rocker_port, + attr->u.stp_state, + trans); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = rocker_world_port_attr_bridge_flags_set(rocker_port, + attr->u.brport_flags, + trans); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port, + attr->u.ageing_time, + trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_world_port_obj_vlan_add(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = rocker_world_port_obj_fib4_add(rocker_port, + SWITCHDEV_OBJ_IPV4_FIB(obj), + trans); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_world_port_obj_fdb_add(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_world_port_obj_vlan_del(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = rocker_world_port_obj_fib4_del(rocker_port, + SWITCHDEV_OBJ_IPV4_FIB(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_world_port_obj_fdb_del(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_obj_dump(struct net_device *dev, + struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_world_port_obj_fdb_dump(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + cb); + break; + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_world_port_obj_vlan_dump(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + cb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static const struct switchdev_ops rocker_port_switchdev_ops = { + .switchdev_port_attr_get = rocker_port_attr_get, + .switchdev_port_attr_set = rocker_port_attr_set, + .switchdev_port_obj_add = rocker_port_obj_add, + .switchdev_port_obj_del = rocker_port_obj_del, + .switchdev_port_obj_dump = rocker_port_obj_dump, +}; + +/******************** + * ethtool interface + ********************/ + +static int rocker_port_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); +} + +static int rocker_port_set_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); +} + +static void rocker_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static struct rocker_port_stats { + char str[ETH_GSTRING_LEN]; + int type; +} rocker_port_stats[] = { + { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, + { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, + { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, + { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, + + { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, + { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, + { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, + { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, +}; + +#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) + +static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int +rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_stats; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) + return -EMSGSIZE; + + cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_stats) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, cmd_stats); + + return 0; +} + +static int +rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; + const struct rocker_tlv *pattr; + u32 pport; + u64 *data = priv; + int i; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + + if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) + return -EIO; + + pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); + if (pport != rocker_port->pport) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + pattr = stats_attrs[rocker_port_stats[i].type]; + if (!pattr) + continue; + + data[i] = rocker_tlv_get_u64(pattr); + } + + return 0; +} + +static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, + void *priv) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_stats_prep, NULL, + rocker_cmd_get_port_stats_ethtool_proc, + priv); +} + +static void rocker_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { + int i; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) + data[i] = 0; + } +} + +static int rocker_port_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ROCKER_PORT_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops rocker_port_ethtool_ops = { + .get_settings = rocker_port_get_settings, + .set_settings = rocker_port_set_settings, + .get_drvinfo = rocker_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = rocker_port_get_strings, + .get_ethtool_stats = rocker_port_get_stats, + .get_sset_count = rocker_port_get_sset_count, +}; + +/***************** + * NAPI interface + *****************/ + +static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_tx); +} + +static int rocker_port_poll_tx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); + const struct rocker *rocker = rocker_port->rocker; + const struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Cleanup tx descriptors */ + while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { + struct sk_buff *skb; + + err = rocker_desc_err(desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "tx desc received with err %d\n", + err); + rocker_tx_desc_frags_unmap(rocker_port, desc_info); + + skb = rocker_desc_cookie_ptr_get(desc_info); + if (err == 0) { + rocker_port->dev->stats.tx_packets++; + rocker_port->dev->stats.tx_bytes += skb->len; + } else { + rocker_port->dev->stats.tx_errors++; + } + + dev_kfree_skb_any(skb); + credits++; + } + + if (credits && netif_queue_stopped(rocker_port->dev)) + netif_wake_queue(rocker_port->dev); + + napi_complete(napi); + rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); + + return 0; +} + +static int rocker_port_rx_proc(const struct rocker *rocker, + const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + size_t rx_len; + u16 rx_flags = 0; + + if (!skb) + return -ENOENT; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) + return -EINVAL; + if (attrs[ROCKER_TLV_RX_FLAGS]) + rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); + + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + + rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, rocker_port->dev); + + if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) + skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + + rocker_port->dev->stats.rx_packets++; + rocker_port->dev->stats.rx_bytes += skb->len; + + netif_receive_skb(skb); + + return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); +} + +static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_rx); +} + +static int rocker_port_poll_rx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); + const struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Process rx descriptors */ + while (credits < budget && + (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "rx desc received with err %d\n", + err); + } else { + err = rocker_port_rx_proc(rocker, rocker_port, + desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "rx processing failed with err %d\n", + err); + } + if (err) + rocker_port->dev->stats.rx_errors++; + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); + credits++; + } + + if (credits < budget) + napi_complete(napi); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + + return credits; +} + +/***************** + * PCI driver ops + *****************/ + +static void rocker_carrier_init(const struct rocker_port *rocker_port) +{ + const struct rocker *rocker = rocker_port->rocker; + u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); + bool link_up; + + link_up = link_status & (1 << rocker_port->pport); + if (link_up) + netif_carrier_on(rocker_port->dev); + else + netif_carrier_off(rocker_port->dev); +} + +static void rocker_remove_ports(struct rocker *rocker) +{ + struct rocker_port *rocker_port; + int i; + + for (i = 0; i < rocker->port_count; i++) { + rocker_port = rocker->ports[i]; + if (!rocker_port) + continue; + rocker_world_port_fini(rocker_port); + unregister_netdev(rocker_port->dev); + rocker_world_port_post_fini(rocker_port); + free_netdev(rocker_port->dev); + } + rocker_world_fini(rocker); + kfree(rocker->ports); +} + +static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) +{ + const struct rocker *rocker = rocker_port->rocker; + const struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_cmd_get_port_settings_macaddr(rocker_port, + rocker_port->dev->dev_addr); + if (err) { + dev_warn(&pdev->dev, "failed to get mac address, using random\n"); + eth_hw_addr_random(rocker_port->dev); + } +} + +static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) +{ + const struct pci_dev *pdev = rocker->pdev; + struct rocker_port *rocker_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct rocker_port)); + if (!dev) + return -ENOMEM; + rocker_port = netdev_priv(dev); + rocker_port->dev = dev; + rocker_port->rocker = rocker; + rocker_port->port_number = port_number; + rocker_port->pport = port_number + 1; + + err = rocker_world_check_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "world init failed\n"); + goto err_world_check_init; + } + + rocker_port_dev_addr_init(rocker_port); + dev->netdev_ops = &rocker_port_netdev_ops; + dev->ethtool_ops = &rocker_port_ethtool_ops; + dev->switchdev_ops = &rocker_port_switchdev_ops; + netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + NAPI_POLL_WEIGHT); + netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, + NAPI_POLL_WEIGHT); + rocker_carrier_init(rocker_port); + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; + + err = rocker_world_port_pre_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "port world pre-init failed\n"); + goto err_world_port_pre_init; + } + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + rocker->ports[port_number] = rocker_port; + + err = rocker_world_port_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "port world init failed\n"); + goto err_world_port_init; + } + + return 0; + +err_world_port_init: + rocker->ports[port_number] = NULL; + unregister_netdev(dev); +err_register_netdev: + rocker_world_port_post_fini(rocker_port); +err_world_port_pre_init: +err_world_check_init: + free_netdev(dev); + return err; +} + +static int rocker_probe_ports(struct rocker *rocker) +{ + int i; + size_t alloc_size; + int err; + + alloc_size = sizeof(struct rocker_port *) * rocker->port_count; + rocker->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!rocker->ports) + return -ENOMEM; + for (i = 0; i < rocker->port_count; i++) { + err = rocker_probe_port(rocker, i); + if (err) + goto remove_ports; + } + return 0; + +remove_ports: + rocker_remove_ports(rocker); + return err; +} + +static int rocker_msix_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int msix_entries; + int i; + int err; + + msix_entries = pci_msix_vec_count(pdev); + if (msix_entries < 0) + return msix_entries; + + if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) + return -EINVAL; + + rocker->msix_entries = kmalloc_array(msix_entries, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!rocker->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_entries; i++) + rocker->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); + if (err < 0) + goto err_enable_msix; + + return 0; + +err_enable_msix: + kfree(rocker->msix_entries); + return err; +} + +static void rocker_msix_fini(const struct rocker *rocker) +{ + pci_disable_msix(rocker->pdev); + kfree(rocker->msix_entries); +} + +static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rocker *rocker; + int err; + + rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); + if (!rocker) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, rocker_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + err = -EINVAL; + goto err_pci_resource_len_check; + } + + rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rocker->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + rocker->pdev = pdev; + pci_set_drvdata(pdev, rocker); + + rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); + + err = rocker_msix_init(rocker); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + err = rocker_basic_hw_test(rocker); + if (err) { + dev_err(&pdev->dev, "basic hw test failed\n"); + goto err_basic_hw_test; + } + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + + err = rocker_dma_rings_init(rocker); + if (err) + goto err_dma_rings_init; + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), + rocker_cmd_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign cmd irq\n"); + goto err_request_cmd_irq; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), + rocker_event_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign event irq\n"); + goto err_request_event_irq; + } + + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + + dev_info(&pdev->dev, "Rocker switch with id %*phN\n", + (int)sizeof(rocker->hw.id), &rocker->hw.id); + + return 0; + +err_probe_ports: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); +err_request_event_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); +err_request_cmd_irq: + rocker_dma_rings_fini(rocker); +err_dma_rings_init: +err_basic_hw_test: + rocker_msix_fini(rocker); +err_msix_init: + iounmap(rocker->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(rocker); + return err; +} + +static void rocker_remove(struct pci_dev *pdev) +{ + struct rocker *rocker = pci_get_drvdata(pdev); + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + rocker_remove_ports(rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); + rocker_dma_rings_fini(rocker); + rocker_msix_fini(rocker); + iounmap(rocker->hw_addr); + pci_release_regions(rocker->pdev); + pci_disable_device(rocker->pdev); + kfree(rocker); +} + +static struct pci_driver rocker_pci_driver = { + .name = rocker_driver_name, + .id_table = rocker_pci_id_table, + .probe = rocker_probe, + .remove = rocker_remove, +}; + +/************************************ + * Net device notifier event handler + ************************************/ + +static bool rocker_port_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &rocker_port_netdev_ops; +} + +static int rocker_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + struct rocker_port *rocker_port; + int err; + + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + info = ptr; + if (!info->master) + goto out; + rocker_port = netdev_priv(dev); + if (info->linking) { + err = rocker_world_port_master_linked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master linked (err %d)\n", + err); + } else { + err = rocker_world_port_master_unlinked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", + err); + } + } +out: + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netdevice_nb __read_mostly = { + .notifier_call = rocker_netdevice_event, +}; + +/************************************ + * Net event notifier event handler + ************************************/ + +static int rocker_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct rocker_port *rocker_port; + struct net_device *dev; + struct neighbour *n = ptr; + int err; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + dev = n->dev; + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + rocker_port = netdev_priv(dev); + err = rocker_world_port_neigh_update(rocker_port, n); + if (err) + netdev_warn(dev, "failed to handle neigh update (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netevent_nb __read_mostly = { + .notifier_call = rocker_netevent_event, +}; + +/*********************** + * Module init and exit + ***********************/ + +static int __init rocker_module_init(void) +{ + int err; + + register_netdevice_notifier(&rocker_netdevice_nb); + register_netevent_notifier(&rocker_netevent_nb); + err = pci_register_driver(&rocker_pci_driver); + if (err) + goto err_pci_register_driver; + return 0; + +err_pci_register_driver: + unregister_netevent_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + return err; +} + +static void __exit rocker_module_exit(void) +{ + unregister_netevent_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + pci_unregister_driver(&rocker_pci_driver); +} + +module_init(rocker_module_init); +module_exit(rocker_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); +MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); +MODULE_DESCRIPTION("Rocker switch device driver"); +MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c new file mode 100644 index 000000000000..099008a53b03 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -0,0 +1,2952 @@ +/* + * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like + * implementation + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/hashtable.h> +#include <linux/crc32.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/if_vlan.h> +#include <linux/if_bridge.h> +#include <net/neighbour.h> +#include <net/switchdev.h> +#include <net/ip_fib.h> +#include <net/arp.h> + +#include "rocker.h" +#include "rocker_tlv.h" + +struct ofdpa_flow_tbl_key { + u32 priority; + enum rocker_of_dpa_table_id tbl_id; + union { + struct { + u32 in_pport; + u32 in_pport_mask; + enum rocker_of_dpa_table_id goto_tbl; + } ig_port; + struct { + u32 in_pport; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool untagged; + __be16 new_vlan_id; + } vlan; + struct { + u32 in_pport; + u32 in_pport_mask; + __be16 eth_type; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool copy_to_cpu; + } term_mac; + struct { + __be16 eth_type; + __be32 dst4; + __be32 dst4_mask; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + } ucast_routing; + struct { + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + int has_eth_dst; + int has_eth_dst_mask; + __be16 vlan_id; + u32 tunnel_id; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + bool copy_to_cpu; + } bridge; + struct { + u32 in_pport; + u32 in_pport_mask; + u8 eth_src[ETH_ALEN]; + u8 eth_src_mask[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 eth_type; + __be16 vlan_id; + __be16 vlan_id_mask; + u8 ip_proto; + u8 ip_proto_mask; + u8 ip_tos; + u8 ip_tos_mask; + u32 group_id; + } acl; + }; +}; + +struct ofdpa_flow_tbl_entry { + struct hlist_node entry; + u32 cmd; + u64 cookie; + struct ofdpa_flow_tbl_key key; + size_t key_len; + u32 key_crc32; /* key */ +}; + +struct ofdpa_group_tbl_entry { + struct hlist_node entry; + u32 cmd; + u32 group_id; /* key */ + u16 group_count; + u32 *group_ids; + union { + struct { + u8 pop_vlan; + } l2_interface; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + u32 group_id; + } l2_rewrite; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + bool ttl_check; + u32 group_id; + } l3_unicast; + }; +}; + +struct ofdpa_fdb_tbl_entry { + struct hlist_node entry; + u32 key_crc32; /* key */ + bool learned; + unsigned long touched; + struct ofdpa_fdb_tbl_key { + struct ofdpa_port *ofdpa_port; + u8 addr[ETH_ALEN]; + __be16 vlan_id; + } key; +}; + +struct ofdpa_internal_vlan_tbl_entry { + struct hlist_node entry; + int ifindex; /* key */ + u32 ref_count; + __be16 vlan_id; +}; + +struct ofdpa_neigh_tbl_entry { + struct hlist_node entry; + __be32 ip_addr; /* key */ + struct net_device *dev; + u32 ref_count; + u32 index; + u8 eth_dst[ETH_ALEN]; + bool ttl_check; +}; + +enum { + OFDPA_CTRL_LINK_LOCAL_MCAST, + OFDPA_CTRL_LOCAL_ARP, + OFDPA_CTRL_IPV4_MCAST, + OFDPA_CTRL_IPV6_MCAST, + OFDPA_CTRL_DFLT_BRIDGING, + OFDPA_CTRL_DFLT_OVS, + OFDPA_CTRL_MAX, +}; + +#define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00 +#define OFDPA_N_INTERNAL_VLANS 255 +#define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS) +#define OFDPA_UNTAGGED_VID 0 + +struct ofdpa { + struct rocker *rocker; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; /* for flow tbl accesses */ + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; /* for group tbl accesses */ + struct timer_list fdb_cleanup_timer; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ + unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ + u32 neigh_tbl_next_index; +}; + +struct ofdpa_port { + struct ofdpa *ofdpa; + struct rocker_port *rocker_port; + struct net_device *dev; + u32 pport; + struct net_device *bridge_dev; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + unsigned long ageing_time; + bool ctrls[OFDPA_CTRL_MAX]; + unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN]; +}; + +static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; +static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; +static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; + +/* Rocker priority levels for flow table entries. Higher + * priority match takes precedence over lower priority match. + */ + +enum { + OFDPA_PRIORITY_UNKNOWN = 0, + OFDPA_PRIORITY_IG_PORT = 1, + OFDPA_PRIORITY_VLAN = 1, + OFDPA_PRIORITY_TERM_MAC_UCAST = 0, + OFDPA_PRIORITY_TERM_MAC_MCAST = 1, + OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, + OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, + OFDPA_PRIORITY_BRIDGING_VLAN = 3, + OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, + OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, + OFDPA_PRIORITY_BRIDGING_TENANT = 3, + OFDPA_PRIORITY_ACL_CTRL = 3, + OFDPA_PRIORITY_ACL_NORMAL = 2, + OFDPA_PRIORITY_ACL_DFLT = 1, +}; + +static bool ofdpa_vlan_id_is_internal(__be16 vlan_id) +{ + u16 start = OFDPA_INTERNAL_VLAN_ID_BASE; + u16 end = 0xffe; + u16 _vlan_id = ntohs(vlan_id); + + return (_vlan_id >= start && _vlan_id <= end); +} + +static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port, + u16 vid, bool *pop_vlan) +{ + __be16 vlan_id; + + if (pop_vlan) + *pop_vlan = false; + vlan_id = htons(vid); + if (!vlan_id) { + vlan_id = ofdpa_port->internal_vlan_id; + if (pop_vlan) + *pop_vlan = true; + } + + return vlan_id; +} + +static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port, + __be16 vlan_id) +{ + if (ofdpa_vlan_id_is_internal(vlan_id)) + return 0; + + return ntohs(vlan_id); +} + +static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port, + const char *kind) +{ + return ofdpa_port->bridge_dev && + !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind); +} + +static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port) +{ + return ofdpa_port_is_slave(ofdpa_port, "bridge"); +} + +static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port) +{ + return ofdpa_port_is_slave(ofdpa_port, "openvswitch"); +} + +#define OFDPA_OP_FLAG_REMOVE BIT(0) +#define OFDPA_OP_FLAG_NOWAIT BIT(1) +#define OFDPA_OP_FLAG_LEARNED BIT(2) +#define OFDPA_OP_FLAG_REFRESH BIT(3) + +static bool ofdpa_flags_nowait(int flags) +{ + return flags & OFDPA_OP_FLAG_NOWAIT; +} + +static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + struct switchdev_trans_item *elem = NULL; + gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ? + GFP_ATOMIC : GFP_KERNEL; + + /* If in transaction prepare phase, allocate the memory + * and enqueue it on a transaction. If in transaction + * commit phase, dequeue the memory from the transaction + * rather than re-allocating the memory. The idea is the + * driver code paths for prepare and commit are identical + * so the memory allocated in the prepare phase is the + * memory used in the commit phase. + */ + + if (!trans) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + } else if (switchdev_trans_ph_prepare(trans)) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + if (!elem) + return NULL; + switchdev_trans_item_enqueue(trans, elem, kfree, elem); + } else { + elem = switchdev_trans_item_dequeue(trans); + } + + return elem ? elem + 1 : NULL; +} + +static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + return __ofdpa_mem_alloc(trans, flags, size); +} + +static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags, + size_t n, size_t size) +{ + return __ofdpa_mem_alloc(trans, flags, n * size); +} + +static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem) +{ + struct switchdev_trans_item *elem; + + /* Frees are ignored if in transaction prepare phase. The + * memory remains on the per-port list until freed in the + * commit phase. + */ + + if (switchdev_trans_ph_prepare(trans)) + return; + + elem = (struct switchdev_trans_item *) mem - 1; + kfree(elem); +} + +/************************************************************* + * Flow, group, FDB, internal VLAN and neigh command prepares + *************************************************************/ + +static int +ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ig_port.goto_tbl)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.vlan.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.vlan.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.vlan.goto_tbl)) + return -EMSGSIZE; + if (entry->key.vlan.untagged && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, + entry->key.vlan.new_vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.term_mac.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.term_mac.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.term_mac.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.term_mac.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.term_mac.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.term_mac.goto_tbl)) + return -EMSGSIZE; + if (entry->key.term_mac.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.term_mac.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.ucast_routing.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, + entry->key.ucast_routing.dst4)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, + entry->key.ucast_routing.dst4_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ucast_routing.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.ucast_routing.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (entry->key.bridge.has_eth_dst && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.bridge.eth_dst)) + return -EMSGSIZE; + if (entry->key.bridge.has_eth_dst_mask && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.bridge.eth_dst_mask)) + return -EMSGSIZE; + if (entry->key.bridge.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.bridge.vlan_id)) + return -EMSGSIZE; + if (entry->key.bridge.tunnel_id && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, + entry->key.bridge.tunnel_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.bridge.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.bridge.group_id)) + return -EMSGSIZE; + if (entry->key.bridge.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.bridge.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->key.acl.eth_src)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_src_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.acl.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.acl.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.acl.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.acl.vlan_id_mask)) + return -EMSGSIZE; + + switch (ntohs(entry->key.acl.eth_type)) { + case ETH_P_IP: + case ETH_P_IPV6: + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, + entry->key.acl.ip_proto)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, + entry->key.acl.ip_proto_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, + entry->key.acl.ip_tos & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, + entry->key.acl.ip_tos_mask & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, + (entry->key.acl.ip_tos & 0xc0) >> 6)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_ECN_MASK, + (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) + return -EMSGSIZE; + break; + } + + if (entry->key.acl.group_id != ROCKER_GROUP_NONE && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.acl.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, + entry->key.tbl_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, + entry->key.priority)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + + switch (entry->key.tbl_id) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, + struct ofdpa_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, + ROCKER_GROUP_PORT_GET(entry->group_id))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, + entry->l2_interface.pop_vlan)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l2_rewrite.group_id)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l2_rewrite.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l2_rewrite.eth_dst)) + return -EMSGSIZE; + if (entry->l2_rewrite.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l2_rewrite.vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + int i; + struct rocker_tlv *group_ids; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, + entry->group_count)) + return -EMSGSIZE; + + group_ids = rocker_tlv_nest_start(desc_info, + ROCKER_TLV_OF_DPA_GROUP_IDS); + if (!group_ids) + return -EMSGSIZE; + + for (i = 0; i < entry->group_count; i++) + /* Note TLV array is 1-based */ + if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, group_ids); + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l3_unicast.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l3_unicast.eth_dst)) + return -EMSGSIZE; + if (entry->l3_unicast.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l3_unicast.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, + entry->l3_unicast.ttl_check)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l3_unicast.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ofdpa_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +/*************************************************** + * Flow, group, FDB, internal VLAN and neigh tables + ***************************************************/ + +static struct ofdpa_flow_tbl_entry * +ofdpa_flow_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + + hash_for_each_possible(ofdpa->flow_tbl, found, + entry, match->key_crc32) { + if (memcmp(&found->key, &match->key, key_len) == 0) + return found; + } + + return NULL; +} + +static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); + + found = ofdpa_flow_tbl_find(ofdpa, match); + + if (found) { + match->cookie = found->cookie; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + ofdpa_kfree(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; + } else { + found = match; + found->cookie = ofdpa->flow_tbl_next_cookie++; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32); + + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); + + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_flow_tbl_add, + found, NULL, NULL); + return 0; +} + +static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); + + found = ofdpa_flow_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; + } + + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); + + ofdpa_kfree(trans, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_flow_tbl_del, + found, NULL, NULL); + ofdpa_kfree(trans, found); + } + + return err; +} + +static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *entry) +{ + if (flags & OFDPA_OP_FLAG_REMOVE) + return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry); + else + return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + enum rocker_of_dpa_table_id goto_tbl) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = OFDPA_PRIORITY_IG_PORT; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; + entry->key.ig_port.goto_tbl = goto_tbl; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, __be16 vlan_id, + __be16 vlan_id_mask, + enum rocker_of_dpa_table_id goto_tbl, + bool untagged, __be16 new_vlan_id) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = OFDPA_PRIORITY_VLAN; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + entry->key.vlan.in_pport = in_pport; + entry->key.vlan.vlan_id = vlan_id; + entry->key.vlan.vlan_id_mask = vlan_id_mask; + entry->key.vlan.goto_tbl = goto_tbl; + + entry->key.vlan.untagged = untagged; + entry->key.vlan.new_vlan_id = new_vlan_id; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + u32 in_pport, u32 in_pport_mask, + __be16 eth_type, const u8 *eth_dst, + const u8 *eth_dst_mask, __be16 vlan_id, + __be16 vlan_id_mask, bool copy_to_cpu, + int flags) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + if (is_multicast_ether_addr(eth_dst)) { + entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + } else { + entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + } + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; + entry->key.term_mac.eth_type = eth_type; + ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); + ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); + entry->key.term_mac.vlan_id = vlan_id; + entry->key.term_mac.vlan_id_mask = vlan_id_mask; + entry->key.term_mac.copy_to_cpu = copy_to_cpu; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 vlan_id, u32 tunnel_id, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, bool copy_to_cpu) +{ + struct ofdpa_flow_tbl_entry *entry; + u32 priority; + bool vlan_bridging = !!vlan_id; + bool dflt = !eth_dst || (eth_dst && eth_dst_mask); + bool wild = false; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (eth_dst) { + entry->key.bridge.has_eth_dst = 1; + ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); + } + if (eth_dst_mask) { + entry->key.bridge.has_eth_dst_mask = 1; + ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); + if (!ether_addr_equal(eth_dst_mask, ff_mac)) + wild = true; + } + + priority = OFDPA_PRIORITY_UNKNOWN; + if (vlan_bridging && dflt && wild) + priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD; + else if (vlan_bridging && dflt && !wild) + priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; + else if (vlan_bridging && !dflt) + priority = OFDPA_PRIORITY_BRIDGING_VLAN; + else if (!vlan_bridging && dflt && wild) + priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD; + else if (!vlan_bridging && dflt && !wild) + priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; + else if (!vlan_bridging && !dflt) + priority = OFDPA_PRIORITY_BRIDGING_TENANT; + + entry->key.priority = priority; + entry->key.bridge.vlan_id = vlan_id; + entry->key.bridge.tunnel_id = tunnel_id; + entry->key.bridge.goto_tbl = goto_tbl; + entry->key.bridge.group_id = group_id; + entry->key.bridge.copy_to_cpu = copy_to_cpu; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + __be16 eth_type, __be32 dst, + __be32 dst_mask, u32 priority, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, int flags) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + entry->key.priority = priority; + entry->key.ucast_routing.eth_type = eth_type; + entry->key.ucast_routing.dst4 = dst; + entry->key.ucast_routing.dst4_mask = dst_mask; + entry->key.ucast_routing.goto_tbl = goto_tbl; + entry->key.ucast_routing.group_id = group_id; + entry->key_len = offsetof(struct ofdpa_flow_tbl_key, + ucast_routing.group_id); + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + const u8 *eth_src, const u8 *eth_src_mask, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 eth_type, __be16 vlan_id, + __be16 vlan_id_mask, u8 ip_proto, + u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, + u32 group_id) +{ + u32 priority; + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + priority = OFDPA_PRIORITY_ACL_NORMAL; + if (eth_dst && eth_dst_mask) { + if (ether_addr_equal(eth_dst_mask, mcast_mac)) + priority = OFDPA_PRIORITY_ACL_DFLT; + else if (is_link_local_ether_addr(eth_dst)) + priority = OFDPA_PRIORITY_ACL_CTRL; + } + + entry->key.priority = priority; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; + + if (eth_src) + ether_addr_copy(entry->key.acl.eth_src, eth_src); + if (eth_src_mask) + ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); + if (eth_dst) + ether_addr_copy(entry->key.acl.eth_dst, eth_dst); + if (eth_dst_mask) + ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); + + entry->key.acl.eth_type = eth_type; + entry->key.acl.vlan_id = vlan_id; + entry->key.acl.vlan_id_mask = vlan_id_mask; + entry->key.acl.ip_proto = ip_proto; + entry->key.acl.ip_proto_mask = ip_proto_mask; + entry->key.acl.ip_tos = ip_tos; + entry->key.acl.ip_tos_mask = ip_tos_mask; + entry->key.acl.group_id = group_id; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static struct ofdpa_group_tbl_entry * +ofdpa_group_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa_group_tbl_entry *found; + + hash_for_each_possible(ofdpa->group_tbl, found, + entry, match->group_id) { + if (found->group_id == match->group_id) + return found; + } + + return NULL; +} + +static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans, + struct ofdpa_group_tbl_entry *entry) +{ + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + ofdpa_kfree(trans, entry->group_ids); + break; + default: + break; + } + ofdpa_kfree(trans, entry); +} + +static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_group_tbl_entry *found; + unsigned long lock_flags; + + spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); + + found = ofdpa_group_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + ofdpa_group_tbl_entry_free(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; + } else { + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->group_tbl, &found->entry, found->group_id); + + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); + + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_group_tbl_add, + found, NULL, NULL); + return 0; +} + +static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_group_tbl_entry *found; + unsigned long lock_flags; + int err = 0; + + spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); + + found = ofdpa_group_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; + } + + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); + + ofdpa_group_tbl_entry_free(trans, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_group_tbl_del, + found, NULL, NULL); + ofdpa_group_tbl_entry_free(trans, found); + } + + return err; +} + +static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *entry) +{ + if (flags & OFDPA_OP_FLAG_REMOVE) + return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry); + else + return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u32 out_pport, + int pop_vlan) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + entry->l2_interface.pop_vlan = pop_vlan; + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = group_id; + entry->group_count = group_count; + + entry->group_ids = ofdpa_kcalloc(trans, flags, + group_count, sizeof(u32)); + if (!entry->group_ids) { + ofdpa_kfree(trans, entry); + return -ENOMEM; + } + memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags, + group_count, group_ids, + group_id); +} + +static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 index, const u8 *src_mac, const u8 *dst_mac, + __be16 vlan_id, bool ttl_check, u32 pport) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L3_UNICAST(index); + if (src_mac) + ether_addr_copy(entry->l3_unicast.eth_src, src_mac); + if (dst_mac) + ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); + entry->l3_unicast.vlan_id = vlan_id; + entry->l3_unicast.ttl_check = ttl_check; + entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static struct ofdpa_neigh_tbl_entry * +ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr) +{ + struct ofdpa_neigh_tbl_entry *found; + + hash_for_each_possible(ofdpa->neigh_tbl, found, + entry, be32_to_cpu(ip_addr)) + if (found->ip_addr == ip_addr) + return found; + + return NULL; +} + +static void ofdpa_neigh_add(struct ofdpa *ofdpa, + struct switchdev_trans *trans, + struct ofdpa_neigh_tbl_entry *entry) +{ + if (!switchdev_trans_ph_commit(trans)) + entry->index = ofdpa->neigh_tbl_next_index++; + if (switchdev_trans_ph_prepare(trans)) + return; + entry->ref_count++; + hash_add(ofdpa->neigh_tbl, &entry->entry, + be32_to_cpu(entry->ip_addr)); +} + +static void ofdpa_neigh_del(struct switchdev_trans *trans, + struct ofdpa_neigh_tbl_entry *entry) +{ + if (switchdev_trans_ph_prepare(trans)) + return; + if (--entry->ref_count == 0) { + hash_del(&entry->entry); + ofdpa_kfree(trans, entry); + } +} + +static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry, + struct switchdev_trans *trans, + const u8 *eth_dst, bool ttl_check) +{ + if (eth_dst) { + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = ttl_check; + } else if (!switchdev_trans_ph_prepare(trans)) { + entry->ref_count++; + } +} + +static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, __be32 ip_addr, const u8 *eth_dst) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_neigh_tbl_entry *entry; + struct ofdpa_neigh_tbl_entry *found; + unsigned long lock_flags; + __be16 eth_type = htons(ETH_P_IP); + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + u32 priority = 0; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + bool updating; + bool removing; + int err = 0; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); + + found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = ofdpa_port->dev; + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = true; + ofdpa_neigh_add(ofdpa, trans, entry); + } else if (removing) { + memcpy(entry, found, sizeof(*entry)); + ofdpa_neigh_del(trans, found); + } else if (updating) { + ofdpa_neigh_update(found, trans, eth_dst, true); + memcpy(entry, found, sizeof(*entry)); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); + + if (err) + goto err_out; + + /* For each active neighbor, we have an L3 unicast group and + * a /32 route to the neighbor, which uses the L3 unicast + * group. The L3 unicast group can also be referred to by + * other routes' nexthops. + */ + + err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags, + entry->index, + ofdpa_port->dev->dev_addr, + entry->eth_dst, + ofdpa_port->internal_vlan_id, + entry->ttl_check, + ofdpa_port->pport); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n", + err, entry->index); + goto err_out; + } + + if (adding || removing) { + group_id = ROCKER_GROUP_L3_UNICAST(entry->index); + err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, + eth_type, ip_addr, + inet_make_mask(32), + priority, goto_tbl, + group_id, flags); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n", + err, &entry->ip_addr, group_id); + } + +err_out: + if (!adding) + ofdpa_kfree(trans, entry); + + return err; +} + +static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + __be32 ip_addr) +{ + struct net_device *dev = ofdpa_port->dev; + struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); + int err = 0; + + if (!n) { + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (IS_ERR(n)) + return IS_ERR(n); + } + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + + if (n->nud_state & NUD_VALID) + err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0, + ip_addr, n->ha); + else + neigh_event_send(n, NULL); + + neigh_release(n); + return err; +} + +static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be32 ip_addr, u32 *index) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_neigh_tbl_entry *entry; + struct ofdpa_neigh_tbl_entry *found; + unsigned long lock_flags; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + bool updating; + bool removing; + bool resolved = true; + int err = 0; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); + + found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); + if (found) + *index = found->index; + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = ofdpa_port->dev; + ofdpa_neigh_add(ofdpa, trans, entry); + *index = entry->index; + resolved = false; + } else if (removing) { + ofdpa_neigh_del(trans, found); + } else if (updating) { + ofdpa_neigh_update(found, trans, NULL, false); + resolved = !is_zero_ether_addr(found->eth_dst); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); + + if (!adding) + ofdpa_kfree(trans, entry); + + if (err) + return err; + + /* Resolved means neigh ip_addr is resolved to neigh mac. */ + + if (!resolved) + err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr); + + return err; +} + +static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa, + int port_index) +{ + struct rocker_port *rocker_port; + + rocker_port = ofdpa->rocker->ports[port_index]; + return rocker_port ? rocker_port->wpriv : NULL; +} + +static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, __be16 vlan_id) +{ + struct ofdpa_port *p; + const struct ofdpa *ofdpa = ofdpa_port->ofdpa; + unsigned int port_count = ofdpa->rocker->port_count; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 *group_ids; + u8 group_count = 0; + int err = 0; + int i; + + group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32)); + if (!group_ids) + return -ENOMEM; + + /* Adjust the flood group for this VLAN. The flood group + * references an L2 interface group for each port in this + * VLAN. + */ + + for (i = 0; i < port_count; i++) { + p = ofdpa_port_get(ofdpa, i); + if (!p) + continue; + if (!ofdpa_port_is_bridged(p)) + continue; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { + group_ids[group_count++] = + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); + } + } + + /* If there are no bridged ports in this VLAN, we're done */ + if (group_count == 0) + goto no_ports_in_vlan; + + err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id, + group_count, group_ids, group_id); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); + +no_ports_in_vlan: + ofdpa_kfree(trans, group_ids); + return err; +} + +static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, bool pop_vlan) +{ + const struct ofdpa *ofdpa = ofdpa_port->ofdpa; + unsigned int port_count = ofdpa->rocker->port_count; + struct ofdpa_port *p; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + u32 out_pport; + int ref = 0; + int err; + int i; + + /* An L2 interface group for this port in this VLAN, but + * only when port STP state is LEARNING|FORWARDING. + */ + + if (ofdpa_port->stp_state == BR_STATE_LEARNING || + ofdpa_port->stp_state == BR_STATE_FORWARDING) { + out_pport = ofdpa_port->pport; + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + /* An L2 interface group for this VLAN to CPU port. + * Add when first port joins this VLAN and destroy when + * last port leaves this VLAN. + */ + + for (i = 0; i < port_count; i++) { + p = ofdpa_port_get(ofdpa, i); + if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) + ref++; + } + + if ((!adding || ref != 1) && (adding || ref != 0)) + return 0; + + out_pport = 0; + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err); + return err; + } + + return 0; +} + +static struct ofdpa_ctrl { + const u8 *eth_dst; + const u8 *eth_dst_mask; + __be16 eth_type; + bool acl; + bool bridge; + bool term; + bool copy_to_cpu; +} ofdpa_ctrls[] = { + [OFDPA_CTRL_LINK_LOCAL_MCAST] = { + /* pass link local multicast pkts up to CPU for filtering */ + .eth_dst = ll_mac, + .eth_dst_mask = ll_mask, + .acl = true, + }, + [OFDPA_CTRL_LOCAL_ARP] = { + /* pass local ARP pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .eth_type = htons(ETH_P_ARP), + .acl = true, + }, + [OFDPA_CTRL_IPV4_MCAST] = { + /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ + .eth_dst = ipv4_mcast, + .eth_dst_mask = ipv4_mask, + .eth_type = htons(ETH_P_IP), + .term = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_IPV6_MCAST] = { + /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ + .eth_dst = ipv6_mcast, + .eth_dst_mask = ipv6_mask, + .eth_type = htons(ETH_P_IPV6), + .term = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_DFLT_BRIDGING] = { + /* flood any pkts on vlan */ + .bridge = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_DFLT_OVS] = { + /* pass all pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .acl = true, + }, +}; + +static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport = ofdpa_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; + const u8 *eth_src = NULL; + const u8 *eth_src_mask = NULL; + __be16 vlan_id_mask = htons(0xffff); + u8 ip_proto = 0; + u8 ip_proto_mask = 0; + u8 ip_tos = 0; + u8 ip_tos_mask = 0; + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + int err; + + err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags, + in_pport, in_pport_mask, + eth_src, eth_src_mask, + ctrl->eth_dst, ctrl->eth_dst_mask, + ctrl->eth_type, + vlan_id, vlan_id_mask, + ip_proto, ip_proto_mask, + ip_tos, ip_tos_mask, + group_id); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, + const struct ofdpa_ctrl *ctrl, + __be16 vlan_id) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 tunnel_id = 0; + int err; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return 0; + + err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, + ctrl->eth_dst, ctrl->eth_dst_mask, + vlan_id, tunnel_id, + goto_tbl, group_id, ctrl->copy_to_cpu); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 vlan_id_mask = htons(0xffff); + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = ofdpa_port->internal_vlan_id; + + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + ctrl->eth_type, ctrl->eth_dst, + ctrl->eth_dst_mask, vlan_id, + vlan_id_mask, ctrl->copy_to_cpu, + flags); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + if (ctrl->acl) + return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags, + ctrl, vlan_id); + if (ctrl->bridge) + return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags, + ctrl, vlan_id); + + if (ctrl->term) + return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags, + ctrl, vlan_id); + + return -EOPNOTSUPP; +} + +static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + int err = 0; + int i; + + for (i = 0; i < OFDPA_CTRL_MAX; i++) { + if (ofdpa_port->ctrls[i]) { + err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, + &ofdpa_ctrls[i], vlan_id); + if (err) + return err; + } + } + + return err; +} + +static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, + ctrl, htons(vid)); + if (err) + break; + } + + return err; +} + +static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, u16 vid) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + u32 in_pport = ofdpa_port->pport; + __be16 vlan_id = htons(vid); + __be16 vlan_id_mask = htons(0xffff); + __be16 internal_vlan_id; + bool untagged; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + int err; + + internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged); + + if (adding && + test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) + return 0; /* already added */ + else if (!adding && + !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) + return 0; /* already removed */ + + change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); + + if (adding) { + err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err); + goto err_out; + } + } + + err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags, + internal_vlan_id, untagged); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err); + goto err_out; + } + + err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); + goto err_out; + } + + err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags, + in_pport, vlan_id, vlan_id_mask, + goto_tbl, untagged, internal_vlan_id); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err); + +err_out: + if (switchdev_trans_ph_prepare(trans)) + change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); + + return err; +} + +static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + enum rocker_of_dpa_table_id goto_tbl; + u32 in_pport; + u32 in_pport_mask; + int err; + + /* Normal Ethernet Frames. Matches pkts from any local physical + * ports. Goto VLAN tbl. + */ + + in_pport = 0; + in_pport_mask = 0xffff0000; + goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; + + err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags, + in_pport, in_pport_mask, + goto_tbl); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err); + + return err; +} + +struct ofdpa_fdb_learn_work { + struct work_struct work; + struct ofdpa_port *ofdpa_port; + struct switchdev_trans *trans; + int flags; + u8 addr[ETH_ALEN]; + u16 vid; +}; + +static void ofdpa_port_fdb_learn_work(struct work_struct *work) +{ + const struct ofdpa_fdb_learn_work *lw = + container_of(work, struct ofdpa_fdb_learn_work, work); + bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE); + bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED); + struct switchdev_notifier_fdb_info info; + + info.addr = lw->addr; + info.vid = lw->vid; + + rtnl_lock(); + if (learned && removing) + call_switchdev_notifiers(SWITCHDEV_FDB_DEL, + lw->ofdpa_port->dev, &info.info); + else if (learned && !removing) + call_switchdev_notifiers(SWITCHDEV_FDB_ADD, + lw->ofdpa_port->dev, &info.info); + rtnl_unlock(); + + ofdpa_kfree(lw->trans, work); +} + +static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const u8 *addr, __be16 vlan_id) +{ + struct ofdpa_fdb_learn_work *lw; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 out_pport = ofdpa_port->pport; + u32 tunnel_id = 0; + u32 group_id = ROCKER_GROUP_NONE; + bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC); + bool copy_to_cpu = false; + int err; + + if (ofdpa_port_is_bridged(ofdpa_port)) + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + + if (!(flags & OFDPA_OP_FLAG_REFRESH)) { + err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr, + NULL, vlan_id, tunnel_id, goto_tbl, + group_id, copy_to_cpu); + if (err) + return err; + } + + if (!syncing) + return 0; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return 0; + + lw = ofdpa_kzalloc(trans, flags, sizeof(*lw)); + if (!lw) + return -ENOMEM; + + INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work); + + lw->ofdpa_port = ofdpa_port; + lw->trans = trans; + lw->flags = flags; + ether_addr_copy(lw->addr, addr); + lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id); + + if (switchdev_trans_ph_prepare(trans)) + ofdpa_kfree(trans, lw); + else + schedule_work(&lw->work); + + return 0; +} + +static struct ofdpa_fdb_tbl_entry * +ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_fdb_tbl_entry *match) +{ + struct ofdpa_fdb_tbl_entry *found; + + hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32) + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + + return NULL; +} + +static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + const unsigned char *addr, + __be16 vlan_id, int flags) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *fdb; + struct ofdpa_fdb_tbl_entry *found; + bool removing = (flags & OFDPA_OP_FLAG_REMOVE); + unsigned long lock_flags; + + fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb)); + if (!fdb) + return -ENOMEM; + + fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED); + fdb->touched = jiffies; + fdb->key.ofdpa_port = ofdpa_port; + ether_addr_copy(fdb->key.addr, addr); + fdb->key.vlan_id = vlan_id; + fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + found = ofdpa_fdb_tbl_find(ofdpa, fdb); + + if (found) { + found->touched = jiffies; + if (removing) { + ofdpa_kfree(trans, fdb); + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + } else if (!removing) { + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->fdb_tbl, &fdb->entry, + fdb->key_crc32); + } + + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + /* Check if adding and already exists, or removing and can't find */ + if (!found != !removing) { + ofdpa_kfree(trans, fdb); + if (!found && removing) + return 0; + /* Refreshing existing to update aging timers */ + flags |= OFDPA_OP_FLAG_REFRESH; + } + + return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id); +} + +static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *found; + unsigned long lock_flags; + struct hlist_node *tmp; + int bkt; + int err = 0; + + if (ofdpa_port->stp_state == BR_STATE_LEARNING || + ofdpa_port->stp_state == BR_STATE_FORWARDING) + return 0; + + flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.ofdpa_port != ofdpa_port) + continue; + if (!found->learned) + continue; + err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags, + found->key.addr, + found->key.vlan_id); + if (err) + goto err_out; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + +err_out: + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + return err; +} + +static void ofdpa_fdb_cleanup(unsigned long data) +{ + struct ofdpa *ofdpa = (struct ofdpa *)data; + struct ofdpa_port *ofdpa_port; + struct ofdpa_fdb_tbl_entry *entry; + struct hlist_node *tmp; + unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; + unsigned long expires; + unsigned long lock_flags; + int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE | + OFDPA_OP_FLAG_LEARNED; + int bkt; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) { + if (!entry->learned) + continue; + ofdpa_port = entry->key.ofdpa_port; + expires = entry->touched + ofdpa_port->ageing_time; + if (time_before_eq(expires, jiffies)) { + ofdpa_port_fdb_learn(ofdpa_port, NULL, + flags, entry->key.addr, + entry->key.vlan_id); + hash_del(&entry->entry); + } else if (time_before(expires, next_timer)) { + next_timer = expires; + } + } + + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer)); +} + +static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 eth_type; + const u8 *dst_mac_mask = ff_mac; + __be16 vlan_id_mask = htons(0xffff); + bool copy_to_cpu = false; + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = ofdpa_port->internal_vlan_id; + + eth_type = htons(ETH_P_IP); + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + eth_type, ofdpa_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + if (err) + return err; + + eth_type = htons(ETH_P_IPV6); + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + eth_type, ofdpa_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + + return err; +} + +static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + bool pop_vlan; + u32 out_pport; + __be16 vlan_id; + u16 vid; + int err; + + /* Port will be forwarding-enabled if its STP state is LEARNING + * or FORWARDING. Traffic from CPU can still egress, regardless of + * port STP state. Use L2 interface group on port VLANs as a way + * to toggle port forwarding: if forwarding is disabled, L2 + * interface group will not exist. + */ + + if (ofdpa_port->stp_state != BR_STATE_LEARNING && + ofdpa_port->stp_state != BR_STATE_FORWARDING) + flags |= OFDPA_OP_FLAG_REMOVE; + + out_pport = ofdpa_port->pport; + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + vlan_id = htons(vid); + pop_vlan = ofdpa_vlan_id_is_internal(vlan_id); + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + return 0; +} + +static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, u8 state) +{ + bool want[OFDPA_CTRL_MAX] = { 0, }; + bool prev_ctrls[OFDPA_CTRL_MAX]; + u8 uninitialized_var(prev_state); + int err; + int i; + + if (switchdev_trans_ph_prepare(trans)) { + memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); + prev_state = ofdpa_port->stp_state; + } + + if (ofdpa_port->stp_state == state) + return 0; + + ofdpa_port->stp_state = state; + + switch (state) { + case BR_STATE_DISABLED: + /* port is completely disabled */ + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + if (!ofdpa_port_is_ovsed(ofdpa_port)) + want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; + want[OFDPA_CTRL_IPV4_MCAST] = true; + want[OFDPA_CTRL_IPV6_MCAST] = true; + if (ofdpa_port_is_bridged(ofdpa_port)) + want[OFDPA_CTRL_DFLT_BRIDGING] = true; + else if (ofdpa_port_is_ovsed(ofdpa_port)) + want[OFDPA_CTRL_DFLT_OVS] = true; + else + want[OFDPA_CTRL_LOCAL_ARP] = true; + break; + } + + for (i = 0; i < OFDPA_CTRL_MAX; i++) { + if (want[i] != ofdpa_port->ctrls[i]) { + int ctrl_flags = flags | + (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE); + err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags, + &ofdpa_ctrls[i]); + if (err) + goto err_out; + ofdpa_port->ctrls[i] = want[i]; + } + } + + err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags); + if (err) + goto err_out; + + err = ofdpa_port_fwding(ofdpa_port, trans, flags); + +err_out: + if (switchdev_trans_ph_prepare(trans)) { + memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); + ofdpa_port->stp_state = prev_state; + } + + return err; +} + +static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags) +{ + if (ofdpa_port_is_bridged(ofdpa_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return ofdpa_port_stp_update(ofdpa_port, NULL, flags, + BR_STATE_FORWARDING); +} + +static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags) +{ + if (ofdpa_port_is_bridged(ofdpa_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return ofdpa_port_stp_update(ofdpa_port, NULL, flags, + BR_STATE_DISABLED); +} + +static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + u16 vid, u16 flags) +{ + int err; + + /* XXX deal with flags for PVID and untagged */ + + err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid); + if (err) + return err; + + err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid)); + if (err) + ofdpa_port_vlan(ofdpa_port, trans, + OFDPA_OP_FLAG_REMOVE, vid); + + return err; +} + +static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port, + u16 vid, u16 flags) +{ + int err; + + err = ofdpa_port_router_mac(ofdpa_port, NULL, + OFDPA_OP_FLAG_REMOVE, htons(vid)); + if (err) + return err; + + return ofdpa_port_vlan(ofdpa_port, NULL, + OFDPA_OP_FLAG_REMOVE, vid); +} + +static struct ofdpa_internal_vlan_tbl_entry * +ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex) +{ + struct ofdpa_internal_vlan_tbl_entry *found; + + hash_for_each_possible(ofdpa->internal_vlan_tbl, found, + entry, ifindex) { + if (found->ifindex == ifindex) + return found; + } + + return NULL; +} + +static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port, + int ifindex) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_internal_vlan_tbl_entry *entry; + struct ofdpa_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + entry->ifindex = ifindex; + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); + if (found) { + kfree(entry); + goto found; + } + + found = entry; + hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex); + + for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) { + if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap)) + continue; + found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i); + goto found; + } + + netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n"); + +found: + found->ref_count++; + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + return found->vlan_id; +} + +static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, __be32 dst, + int dst_len, const struct fib_info *fi, + u32 tb_id, int flags) +{ + const struct fib_nh *nh; + __be16 eth_type = htons(ETH_P_IP); + __be32 dst_mask = inet_make_mask(dst_len); + __be16 internal_vlan_id = ofdpa_port->internal_vlan_id; + u32 priority = fi->fib_priority; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + bool nh_on_port; + bool has_gw; + u32 index; + int err; + + /* XXX support ECMP */ + + nh = fi->fib_nh; + nh_on_port = (fi->fib_dev == ofdpa_port->dev); + has_gw = !!nh->nh_gw; + + if (has_gw && nh_on_port) { + err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags, + nh->nh_gw, &index); + if (err) + return err; + + group_id = ROCKER_GROUP_L3_UNICAST(index); + } else { + /* Send to CPU for processing */ + group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); + } + + err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst, + dst_mask, priority, goto_tbl, + group_id, flags); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n", + err, &dst); + + return err; +} + +static void +ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port, + int ifindex) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + unsigned long bit; + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); + if (!found) { + netdev_err(ofdpa_port->dev, + "ifindex (%d) not found in internal VLAN tbl\n", + ifindex); + goto not_found; + } + + if (--found->ref_count <= 0) { + bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE; + clear_bit(bit, ofdpa->internal_vlan_bitmap); + hash_del(&found->entry); + kfree(found); + } + +not_found: + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); +} + +/********************************** + * Rocker world ops implementation + **********************************/ + +static int ofdpa_init(struct rocker *rocker) +{ + struct ofdpa *ofdpa = rocker->wpriv; + + ofdpa->rocker = rocker; + + hash_init(ofdpa->flow_tbl); + spin_lock_init(&ofdpa->flow_tbl_lock); + + hash_init(ofdpa->group_tbl); + spin_lock_init(&ofdpa->group_tbl_lock); + + hash_init(ofdpa->fdb_tbl); + spin_lock_init(&ofdpa->fdb_tbl_lock); + + hash_init(ofdpa->internal_vlan_tbl); + spin_lock_init(&ofdpa->internal_vlan_tbl_lock); + + hash_init(ofdpa->neigh_tbl); + spin_lock_init(&ofdpa->neigh_tbl_lock); + + setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, + (unsigned long) ofdpa); + mod_timer(&ofdpa->fdb_cleanup_timer, jiffies); + + return 0; +} + +static void ofdpa_fini(struct rocker *rocker) +{ + struct ofdpa *ofdpa = rocker->wpriv; + + unsigned long flags; + struct ofdpa_flow_tbl_entry *flow_entry; + struct ofdpa_group_tbl_entry *group_entry; + struct ofdpa_fdb_tbl_entry *fdb_entry; + struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry; + struct ofdpa_neigh_tbl_entry *neigh_entry; + struct hlist_node *tmp; + int bkt; + + del_timer_sync(&ofdpa->fdb_cleanup_timer); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); + hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) + hash_del(&flow_entry->entry); + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->group_tbl_lock, flags); + hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry) + hash_del(&group_entry->entry); + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags); + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry) + hash_del(&fdb_entry->entry); + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags); + hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt, + tmp, internal_vlan_entry, entry) + hash_del(&internal_vlan_entry->entry); + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags); + hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry) + hash_del(&neigh_entry->entry); + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags); +} + +static int ofdpa_port_pre_init(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port->ofdpa = rocker_port->rocker->wpriv; + ofdpa_port->rocker_port = rocker_port; + ofdpa_port->dev = rocker_port->dev; + ofdpa_port->pport = rocker_port->pport; + ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; + ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME; + return 0; +} + +static int ofdpa_port_init(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err; + + switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false); + rocker_port_set_learning(rocker_port, + !!(ofdpa_port->brport_flags & BR_LEARNING)); + + err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0); + if (err) { + netdev_err(ofdpa_port->dev, "install ig port table failed\n"); + return err; + } + + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, + ofdpa_port->dev->ifindex); + + err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); + if (err) { + netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n"); + goto err_untagged_vlan; + } + return 0; + +err_untagged_vlan: + ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); + return err; +} + +static void ofdpa_port_fini(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); +} + +static int ofdpa_port_open(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fwd_enable(ofdpa_port, 0); +} + +static void ofdpa_port_stop(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT); +} + +static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_stp_update(ofdpa_port, trans, 0, state); +} + +static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + unsigned long orig_flags; + int err = 0; + + orig_flags = ofdpa_port->brport_flags; + ofdpa_port->brport_flags = brport_flags; + if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING && + !switchdev_trans_ph_prepare(trans)) + err = rocker_port_set_learning(ofdpa_port->rocker_port, + !!(ofdpa_port->brport_flags & BR_LEARNING)); + + if (switchdev_trans_ph_prepare(trans)) + ofdpa_port->brport_flags = orig_flags; + + return err; +} + +static int +ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + *p_brport_flags = ofdpa_port->brport_flags; + return 0; +} + +static int +ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + if (!switchdev_trans_ph_prepare(trans)) { + ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time); + mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies); + } + + return 0; +} + +static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + vlan->flags = 0; + if (ofdpa_vlan_id_is_internal(htons(vid))) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + vlan->vid_begin = vlan->vid_end = vid; + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fib_ipv4(ofdpa_port, trans, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, 0); +} + +static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fib_ipv4(ofdpa_port, NULL, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, + OFDPA_OP_FLAG_REMOVE); +} + +static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return -EINVAL; + + return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0); +} + +static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); + int flags = OFDPA_OP_FLAG_REMOVE; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return -EINVAL; + + return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags); +} + +static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *found; + struct hlist_node *tmp; + unsigned long lock_flags; + int bkt; + int err = 0; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.ofdpa_port != ofdpa_port) + continue; + ether_addr_copy(fdb->addr, found->key.addr); + fdb->ndm_state = NUD_REACHABLE; + fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port, + found->key.vlan_id); + err = cb(&fdb->obj); + if (err) + break; + } + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + return err; +} + +static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, + struct net_device *bridge) +{ + int err; + + /* Port is joining bridge, so the internal VLAN for the + * port is going to change to the bridge internal VLAN. + * Let's remove untagged VLAN (vid=0) from port and + * re-add once internal VLAN has changed. + */ + + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + ofdpa_port_internal_vlan_id_put(ofdpa_port, + ofdpa_port->dev->ifindex); + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex); + + ofdpa_port->bridge_dev = bridge; + switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true); + + return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); +} + +static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) +{ + int err; + + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + ofdpa_port_internal_vlan_id_put(ofdpa_port, + ofdpa_port->bridge_dev->ifindex); + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, + ofdpa_port->dev->ifindex); + + switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev, + false); + ofdpa_port->bridge_dev = NULL; + + err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + if (ofdpa_port->dev->flags & IFF_UP) + err = ofdpa_port_fwd_enable(ofdpa_port, 0); + + return err; +} + +static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port, + struct net_device *master) +{ + int err; + + ofdpa_port->bridge_dev = master; + + err = ofdpa_port_fwd_disable(ofdpa_port, 0); + if (err) + return err; + err = ofdpa_port_fwd_enable(ofdpa_port, 0); + + return err; +} + +static int ofdpa_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err = 0; + + if (netif_is_bridge_master(master)) + err = ofdpa_port_bridge_join(ofdpa_port, master); + else if (netif_is_ovs_master(master)) + err = ofdpa_port_ovs_changed(ofdpa_port, master); + return err; +} + +static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err = 0; + + if (ofdpa_port_is_bridged(ofdpa_port)) + err = ofdpa_port_bridge_leave(ofdpa_port); + else if (ofdpa_port_is_ovsed(ofdpa_port)) + err = ofdpa_port_ovs_changed(ofdpa_port, NULL); + return err; +} + +static int ofdpa_port_neigh_update(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) | + OFDPA_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *) n->primary_key; + + return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); +} + +static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *) n->primary_key; + + return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); +} + +static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED; + + if (ofdpa_port->stp_state != BR_STATE_LEARNING && + ofdpa_port->stp_state != BR_STATE_FORWARDING) + return 0; + + return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags); +} + +struct rocker_world_ops rocker_ofdpa_ops = { + .kind = "ofdpa", + .priv_size = sizeof(struct ofdpa), + .port_priv_size = sizeof(struct ofdpa_port), + .mode = ROCKER_PORT_MODE_OF_DPA, + .init = ofdpa_init, + .fini = ofdpa_fini, + .port_pre_init = ofdpa_port_pre_init, + .port_init = ofdpa_port_init, + .port_fini = ofdpa_port_fini, + .port_open = ofdpa_port_open, + .port_stop = ofdpa_port_stop, + .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set, + .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set, + .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get, + .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set, + .port_obj_vlan_add = ofdpa_port_obj_vlan_add, + .port_obj_vlan_del = ofdpa_port_obj_vlan_del, + .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump, + .port_obj_fib4_add = ofdpa_port_obj_fib4_add, + .port_obj_fib4_del = ofdpa_port_obj_fib4_del, + .port_obj_fdb_add = ofdpa_port_obj_fdb_add, + .port_obj_fdb_del = ofdpa_port_obj_fdb_del, + .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump, + .port_master_linked = ofdpa_port_master_linked, + .port_master_unlinked = ofdpa_port_master_unlinked, + .port_neigh_update = ofdpa_port_neigh_update, + .port_neigh_destroy = ofdpa_port_neigh_destroy, + .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen, +}; diff --git a/drivers/net/ethernet/rocker/rocker_tlv.c b/drivers/net/ethernet/rocker/rocker_tlv.c new file mode 100644 index 000000000000..8185118f3492 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_tlv.c @@ -0,0 +1,53 @@ +/* + * drivers/net/ethernet/rocker/rocker_tlv.c - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> + +#include "rocker_hw.h" +#include "rocker_tlv.h" + +void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = tlv; + } +} + +int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h new file mode 100644 index 000000000000..a63ef82e7c72 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_tlv.h @@ -0,0 +1,201 @@ +/* + * drivers/net/ethernet/rocker/rocker_tlv.h - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_TLV_H +#define _ROCKER_TLV_H + +#include <linux/types.h> + +#include "rocker_hw.h" +#include "rocker.h" + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static inline struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static inline int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static inline int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static inline int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static inline int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static inline int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static inline void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static inline int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static inline u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static inline u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static inline __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) +{ + return *(__be16 *) rocker_tlv_data(tlv); +} + +static inline u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static inline u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len); + +static inline void rocker_tlv_parse_nested(const struct rocker_tlv **tb, + int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static inline void +rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, + const struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static inline struct rocker_tlv * +rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data); + +static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, + int attrtype, __be16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); +} + +static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, + int attrtype, __be32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); +} + +static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static inline struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static inline void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static inline void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + const struct rocker_tlv *start) +{ + desc_info->tlv_size = (const char *) start - desc_info->data; +} + +#endif diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile index dcc80b9d4370..31e968561d5c 100644 --- a/drivers/net/ethernet/samsung/sxgbe/Makefile +++ b/drivers/net/ethernet/samsung/sxgbe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \ sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \ - sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y) + sxgbe_ethtool.o $(samsung-sxgbe-y) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c index 43ccb4a6de15..467ff7033606 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c @@ -180,7 +180,7 @@ int sxgbe_mdio_register(struct net_device *ndev) } for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - struct phy_device *phy = mdio_bus->phy_map[phy_addr]; + struct phy_device *phy = mdiobus_get_phy(mdio_bus, phy_addr); if (phy) { char irq_num[4]; @@ -216,7 +216,7 @@ int sxgbe_mdio_register(struct net_device *ndev) } netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", phy->phy_id, phy_addr, irq_str, - dev_name(&phy->dev), act ? " active" : ""); + phydev_name(phy), act ? " active" : ""); phy_found = true; } } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c deleted file mode 100644 index 51c32194ba88..000000000000 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c +++ /dev/null @@ -1,91 +0,0 @@ -/* 10G controller driver for Samsung SoCs - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Siva Reddy Kallam <siva.kallam@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/bitops.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/phy.h> -#include "sxgbe_common.h" -#include "sxgbe_xpcs.h" - -static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg) -{ - u32 value; - struct sxgbe_priv_data *priv = netdev_priv(ndev); - - value = readl(priv->ioaddr + XPCS_OFFSET + reg); - - return value; -} - -static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data) -{ - struct sxgbe_priv_data *priv = netdev_priv(ndev); - - writel(data, priv->ioaddr + XPCS_OFFSET + reg); - - return 0; -} - -int sxgbe_xpcs_init(struct net_device *ndev) -{ - u32 value; - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - /* 10G XAUI mode */ - sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13)); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE); - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); - - return 0; -} - -int sxgbe_xpcs_init_1G(struct net_device *ndev) -{ - int value; - - /* 10GBASE-X PCS (1G) mode */ - sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13)); - - value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6)); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13)); - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); - - /* Auto Negotiation cluase 37 enable */ - value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12)); - - return 0; -} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h deleted file mode 100644 index 6b26a50724d3..000000000000 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* 10G controller driver for Samsung SoCs - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Byungho An <bh74.an@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __SXGBE_XPCS_H__ -#define __SXGBE_XPCS_H__ - -/* XPCS Registers */ -#define XPCS_OFFSET 0x1A060000 -#define SR_PCS_MMD_CONTROL1 0x030000 -#define SR_PCS_CONTROL2 0x030007 -#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004 -#define VR_PCS_MMD_DIGITAL_STATUS 0x038010 -#define SR_MII_MMD_CONTROL 0x1F0000 -#define SR_MII_MMD_AN_ADV 0x1F0004 -#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005 -#define VR_MII_MMD_AN_CONTROL 0x1F8001 -#define VR_MII_MMD_AN_INT_STATUS 0x1F8002 - -#define XPCS_QSEQ_STATE_STABLE 0x10 -#define XPCS_QSEQ_STATE_MPLLOFF 0x1c -#define XPCS_TYPE_SEL_R 0x00 -#define XPCS_TYPE_SEL_X 0x01 -#define XPCS_TYPE_SEL_W 0x02 -#define XPCS_XAUI_MODE 0x00 -#define XPCS_RXAUI_MODE 0x01 - -int sxgbe_xpcs_init(struct net_device *ndev); -int sxgbe_xpcs_init_1G(struct net_device *ndev); - -#endif /* __SXGBE_XPCS_H__ */ diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 10827476bc0b..5e3f93f04e62 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -32,7 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 034797661f96..445ccdb6bc67 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -783,14 +783,26 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; #define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define IP_PROTO_FULL_MASK 0xFF #define PORT_FULL_MASK ((__force __be16)~0) #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static inline void ip6_fill_mask(__be32 *mask) +{ + mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; +} + static int efx_ethtool_get_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; @@ -833,6 +845,35 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ip_entry->psrc = spec.rem_port; ip_mask->psrc = PORT_FULL_MASK; } + } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IPV6) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V6_FLOW : UDP_V6_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(ip6_entry->ip6dst, spec.loc_host, + sizeof(ip6_entry->ip6dst)); + ip6_fill_mask(ip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(ip6_entry->ip6src, spec.rem_host, + sizeof(ip6_entry->ip6src)); + ip6_fill_mask(ip6_mask->ip6src); + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip6_entry->pdst = spec.loc_port; + ip6_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip6_entry->psrc = spec.rem_port; + ip6_mask->psrc = PORT_FULL_MASK; + } } else if (!(spec.match_flags & ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | @@ -855,6 +896,47 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, mac_entry->h_proto = spec.ether_type; mac_mask->h_proto = ETHER_TYPE_FULL_MASK; } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV4_USER_FLOW; + uip_entry->ip_ver = ETH_RX_NFC_IP4; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip_mask->proto = IP_PROTO_FULL_MASK; + uip_entry->proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + uip_entry->ip4dst = spec.loc_host[0]; + uip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + uip_entry->ip4src = spec.rem_host[0]; + uip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IPV6) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV6_USER_FLOW; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip6_mask->l4_proto = IP_PROTO_FULL_MASK; + uip6_entry->l4_proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(uip6_entry->ip6dst, spec.loc_host, + sizeof(uip6_entry->ip6dst)); + ip6_fill_mask(uip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(uip6_entry->ip6src, spec.rem_host, + sizeof(uip6_entry->ip6src)); + ip6_fill_mask(uip6_mask->ip6src); + } } else { /* The above should handle all filters that we insert */ WARN_ON(1); @@ -946,11 +1028,27 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, } } +static inline bool ip6_mask_is_full(__be32 mask[4]) +{ + return !~(mask[0] & mask[1] & mask[2] & mask[3]); +} + +static inline bool ip6_mask_is_empty(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + static int efx_ethtool_set_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; @@ -1012,6 +1110,92 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, return -EINVAL; break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IPV6); + spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ? + IPPROTO_TCP : IPPROTO_UDP); + if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { + if (!ip6_mask_is_full(ip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(ip6_mask->ip6src)) { + if (!ip6_mask_is_full(ip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (ip6_mask->pdst) { + if (ip6_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip6_entry->pdst; + } + if (ip6_mask->psrc) { + if (ip6_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip6_entry->psrc; + } + if (ip6_mask->tclass) + return -EINVAL; + break; + + case IPV4_USER_FLOW: + if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || + uip_entry->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IP); + if (uip_mask->ip4dst) { + if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = uip_entry->ip4dst; + } + if (uip_mask->ip4src) { + if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = uip_entry->ip4src; + } + if (uip_mask->proto) { + if (uip_mask->proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip_entry->proto; + } + break; + + case IPV6_USER_FLOW: + if (uip6_mask->l4_4_bytes || uip6_mask->tclass) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IPV6); + if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { + if (!ip6_mask_is_full(uip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(uip6_mask->ip6src)) { + if (!ip6_mask_is_full(uip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (uip6_mask->l4_proto) { + if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip6_entry->l4_proto; + } + break; + case ETHER_FLOW: if (!is_zero_ether_addr(mac_mask->h_dest)) { if (ether_addr_equal(mac_mask->h_dest, diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index f7a0ec1bca97..2cdb5718ed66 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -562,14 +562,20 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, u8 num_tc) +int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_tx_queue *tx_queue; - unsigned tc; + unsigned tc, num_tc; int rc; + if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + num_tc = ntc->tc; + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) return -EINVAL; diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index eb9230e2092f..63aca9f847e1 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -7,7 +7,7 @@ config NET_VENDOR_SMSC default y depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \ - PCMCIA || SUPERH || XTENSA + PCMCIA || SUPERH || XTENSA || H8300 ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -38,7 +38,7 @@ config SMC91X select MII depends on !OF || GPIOLIB depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ - M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA + M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA || H8300 ---help--- This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index a3c129e1e40a..1a55c7976df0 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -172,6 +172,17 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l) #define SMC_IRQ_FLAGS 0 +#elif defined(CONFIG_H8300) +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 0 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 0 + +#define SMC_inb(a, r) ioread8((a) + (r)) +#define SMC_outb(v, a, r) iowrite8(v, (a) + (r)) +#define SMC_insb(a, r, p, l) ioread8_rep((a) + (r), p, l) +#define SMC_outsb(a, r, p, l) iowrite8_rep((a) + (r), p, l) + #else /* diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 219a99b7a631..8af25563f627 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -864,8 +864,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) for (i = 0; i < 10; i++) { /* Set PHY to 10/FD, no ANEG, and loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, - BMCR_LOOPBACK | BMCR_FULLDPLX); + smsc911x_mii_write(phy_dev->mdio.bus, phy_dev->mdio.addr, + MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX); /* Enable MAC tx/rx, FD */ spin_lock_irqsave(&pdata->mac_lock, flags); @@ -893,7 +893,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) spin_unlock_irqrestore(&pdata->mac_lock, flags); /* Cancel PHY loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); + smsc911x_mii_write(phy_dev->mdio.bus, phy_dev->mdio.addr, MII_BMCR, 0); smsc911x_reg_write(pdata, TX_CFG, 0); smsc911x_reg_write(pdata, RX_CFG, 0); @@ -1021,7 +1021,7 @@ static int smsc911x_mii_probe(struct net_device *dev) } SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", - phydev->addr, phydev->phy_id); + phydev->mdio.addr, phydev->phy_id); ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link, pdata->config.phy_interface); @@ -1031,9 +1031,7 @@ static int smsc911x_mii_probe(struct net_device *dev) return ret; } - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | @@ -1061,7 +1059,7 @@ static int smsc911x_mii_init(struct platform_device *pdev, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - int err = -ENXIO, i; + int err = -ENXIO; pdata->mii_bus = mdiobus_alloc(); if (!pdata->mii_bus) { @@ -1075,9 +1073,7 @@ static int smsc911x_mii_init(struct platform_device *pdev, pdata->mii_bus->priv = pdata; pdata->mii_bus->read = smsc911x_mii_read; pdata->mii_bus->write = smsc911x_mii_write; - pdata->mii_bus->irq = pdata->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pdata->mii_bus->irq[i] = PHY_POLL; + memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus)); pdata->mii_bus->parent = &pdev->dev; @@ -1992,7 +1988,8 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, } for (i = 0; i <= 31; i++) - data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); + data[j++] = smsc911x_mii_read(phy_dev->mdio.bus, + phy_dev->mdio.addr, i); } static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 4a90cdae5444..8594b9e8b28b 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -78,7 +78,6 @@ struct smsc9420_pdata { struct phy_device *phy_dev; struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; int last_duplex; int last_carrier; }; @@ -316,7 +315,8 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, return; for (i = 0; i <= 31; i++) - data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); + data[j++] = smsc9420_mii_read(phy_dev->mdio.bus, + phy_dev->mdio.addr, i); } static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) @@ -1158,16 +1158,13 @@ static int smsc9420_mii_probe(struct net_device *dev) BUG_ON(pd->phy_dev); /* Device only supports internal PHY at address 1 */ - if (!pd->mii_bus->phy_map[1]) { + phydev = mdiobus_get_phy(pd->mii_bus, 1); + if (!phydev) { netdev_err(dev, "no PHY found at address 1\n"); return -ENODEV; } - phydev = pd->mii_bus->phy_map[1]; - netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n", - phydev->addr, phydev->phy_id); - - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -1175,14 +1172,13 @@ static int smsc9420_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); - /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause); phydev->advertising = phydev->supported; + phy_attached_info(phydev); + pd->phy_dev = phydev; pd->last_duplex = -1; pd->last_carrier = -1; @@ -1193,7 +1189,7 @@ static int smsc9420_mii_probe(struct net_device *dev) static int smsc9420_mii_init(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); - int err = -ENXIO, i; + int err = -ENXIO; pd->mii_bus = mdiobus_alloc(); if (!pd->mii_bus) { @@ -1206,9 +1202,6 @@ static int smsc9420_mii_init(struct net_device *dev) pd->mii_bus->priv = pd; pd->mii_bus->read = smsc9420_mii_read; pd->mii_bus->write = smsc9420_mii_write; - pd->mii_bus->irq = pd->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pd->mii_bus->irq[i] = PHY_POLL; /* Mask all PHYs except ID 1 (internal) */ pd->mii_bus->phy_mask = ~(1 << 1); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 16c85ccd1762..0faf16336035 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -196,7 +196,6 @@ int stmmac_mdio_register(struct net_device *ndev) { int err = 0; struct mii_bus *new_bus; - int *irqlist; struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; int addr, found; @@ -218,8 +217,7 @@ int stmmac_mdio_register(struct net_device *ndev) if (mdio_node) { netdev_dbg(ndev, "FOUND MDIO subnode\n"); } else { - netdev_err(ndev, "NO MDIO subnode\n"); - return 0; + netdev_warn(ndev, "No MDIO subnode found\n"); } } @@ -227,13 +225,8 @@ int stmmac_mdio_register(struct net_device *ndev) if (new_bus == NULL) return -ENOMEM; - if (mdio_bus_data->irqs) { - irqlist = mdio_bus_data->irqs; - } else { - for (addr = 0; addr < PHY_MAX_ADDR; addr++) - priv->mii_irq[addr] = PHY_POLL; - irqlist = priv->mii_irq; - } + if (mdio_bus_data->irqs) + memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); #ifdef CONFIG_OF if (priv->device->of_node) @@ -247,11 +240,13 @@ int stmmac_mdio_register(struct net_device *ndev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; - new_bus->irq = irqlist; new_bus->phy_mask = mdio_bus_data->phy_mask; new_bus->parent = priv->device; - err = of_mdiobus_register(new_bus, mdio_node); + if (mdio_node) + err = of_mdiobus_register(new_bus, mdio_node); + else + err = mdiobus_register(new_bus); if (err != 0) { pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); goto bus_register_fail; @@ -259,7 +254,7 @@ int stmmac_mdio_register(struct net_device *ndev) found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - struct phy_device *phydev = new_bus->phy_map[addr]; + struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); if (phydev) { int act = 0; char irq_num[4]; @@ -271,7 +266,8 @@ int stmmac_mdio_register(struct net_device *ndev) */ if ((mdio_bus_data->irqs == NULL) && (mdio_bus_data->probed_phy_irq > 0)) { - irqlist[addr] = mdio_bus_data->probed_phy_irq; + new_bus->irq[addr] = + mdio_bus_data->probed_phy_irq; phydev->irq = mdio_bus_data->probed_phy_irq; } @@ -298,7 +294,7 @@ int stmmac_mdio_register(struct net_device *ndev) } pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n", ndev->name, phydev->phy_id, addr, - irq_str, dev_name(&phydev->dev), + irq_str, phydev_name(phydev), act ? " active" : ""); found = 1; } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index cc106d892e29..942a95db2061 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -17,6 +17,8 @@ #include <linux/mutex.h> #include <linux/highmem.h> #include <linux/if_vlan.h> +#define CREATE_TRACE_POINTS +#include <trace/events/sunvnet.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/icmpv6.h> @@ -389,17 +391,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) if (vio_version_after_eq(&port->vio, 1, 8)) { struct vio_net_dext *dext = vio_net_ext(desc); + skb_reset_network_header(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { if (skb->protocol == ETH_P_IP) { - struct iphdr *iph = (struct iphdr *)skb->data; + struct iphdr *iph = ip_hdr(skb); iph->check = 0; ip_send_check(iph); } } if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && - skb->ip_summed == CHECKSUM_NONE) - vnet_fullcsum(skb); + skb->ip_summed == CHECKSUM_NONE) { + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + int ihl = iph->ihl * 4; + + skb_reset_transport_header(skb); + skb_set_transport_header(skb, ihl); + vnet_fullcsum(skb); + } + } if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_level = 0; @@ -530,6 +542,8 @@ static int vnet_walk_rx_one(struct vnet_port *port, err = vnet_rx_one(port, desc); if (err == -ECONNRESET) return err; + trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, + index, desc->hdr.ack); desc->hdr.state = VIO_DESC_DONE; err = put_rx_desc(port, dr, desc, index); if (err < 0) @@ -577,9 +591,15 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, ack_start = ack_end = vio_dring_prev(dr, start); if (send_ack) { port->napi_resume = false; + trace_vnet_tx_send_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); } else { + trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); port->napi_resume = true; port->napi_stop_idx = ack_end; return 1; @@ -653,6 +673,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) /* sync for race conditions with vnet_start_xmit() and tell xmit it * is time to send a trigger. */ + trace_vnet_rx_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, end); dr->cons = vio_dring_next(dr, end); desc = vio_dring_entry(dr, dr->cons); if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { @@ -876,6 +898,9 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) int retries = 0; if (port->stop_rx) { + trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + port->stop_rx_idx, -1); err = vnet_send_ack(port, &port->vio.drings[VIO_DRIVER_RX_RING], port->stop_rx_idx, -1, @@ -898,6 +923,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) if (retries++ > VNET_MAX_RETRIES) break; } while (err == -EAGAIN); + trace_vnet_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, start, err); return err; } @@ -1404,8 +1431,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) * producer to consumer announcement that work is available to the * consumer */ - if (!port->start_cons) - goto ldc_start_done; /* previous trigger suffices */ + if (!port->start_cons) { /* previous trigger suffices */ + trace_vnet_skip_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, dr->cons); + goto ldc_start_done; + } err = __vnet_tx_trigger(port, dr->cons); if (unlikely(err < 0)) { diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 9066d7a8483c..70814b7386b3 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -972,9 +972,7 @@ static int dwceqos_mii_probe(struct net_device *ndev) } if (netif_msg_probe(lp)) - netdev_dbg(lp->ndev, - "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n", - phydev, phydev->phy_id, phydev->addr); + phy_attached_info(phydev); phydev->supported &= PHY_GBIT_FEATURES; @@ -983,14 +981,6 @@ static int dwceqos_mii_probe(struct net_device *ndev) lp->duplex = DUPLEX_UNKNOWN; lp->phy_dev = phydev; - if (netif_msg_probe(lp)) { - netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n", - lp->phy_dev->addr, lp->phy_dev->phy_id); - - netdev_dbg(lp->ndev, "attach [%s] phy driver\n", - lp->phy_dev->drv->name); - } - return 0; } @@ -1230,7 +1220,7 @@ static void dwceqos_enable_mmc_interrupt(struct net_local *lp) static int dwceqos_mii_init(struct net_local *lp) { - int ret = -ENXIO, i; + int ret = -ENXIO; struct resource res; struct device_node *mdionode; @@ -1251,24 +1241,14 @@ static int dwceqos_mii_init(struct net_local *lp) lp->mii_bus->priv = lp; lp->mii_bus->parent = &lp->ndev->dev; - lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!lp->mii_bus->irq) { - ret = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; of_address_to_resource(lp->pdev->dev.of_node, 0, &res); snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); if (of_mdiobus_register(lp->mii_bus, mdionode)) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; return 0; -err_out_free_mdio_irq: - kfree(lp->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(lp->mii_bus); err_out: @@ -2107,7 +2087,7 @@ static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, dd = &lp->tx_descs[lp->tx_next]; /* Set DMA Descriptor fields */ - dd->des0 = dma_handle; + dd->des0 = dma_handle + consumed_size; dd->des1 = 0; dd->des2 = dma_size; @@ -2987,7 +2967,6 @@ static int dwceqos_remove(struct platform_device *pdev) if (lp->phy_dev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 77d26fe286c0..7eef45e6d70a 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -316,8 +316,6 @@ static int cpmac_mdio_reset(struct mii_bus *bus) return 0; } -static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; - static struct mii_bus *cpmac_mii; static void cpmac_set_multicast_list(struct net_device *dev) @@ -1118,7 +1116,7 @@ static int cpmac_probe(struct platform_device *pdev) for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { if (!(pdata->phy_mask & (1 << phy_id))) continue; - if (!cpmac_mii->phy_map[phy_id]) + if (!mdiobus_get_phy(cpmac_mii, phy_id)) continue; strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); break; @@ -1226,7 +1224,6 @@ int cpmac_init(void) cpmac_mii->read = cpmac_mdio_read; cpmac_mii->write = cpmac_mdio_write; cpmac_mii->reset = cpmac_mdio_reset; - cpmac_mii->irq = mii_irqs; cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 34ce7dce8c9d..42fdfd4d9d4f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1159,8 +1159,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->data->phy_id, slave->slave_num); slave->phy = NULL; } else { - dev_info(priv->dev, "phy found : id is : 0x%x\n", - slave->phy->phy_id); + phy_attached_info(slave->phy); + phy_start(slave->phy); /* Configure GMII_SEL register */ @@ -2050,7 +2050,8 @@ static int cpsw_probe_dt(struct cpsw_priv *priv, if (!phy_dev) return -ENODEV; snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, phy_dev->bus->id, phy_dev->addr); + PHY_ID_FMT, phy_dev->mdio.bus->id, + phy_dev->mdio.addr); } else if (parp) { u32 phyid; struct device_node *mdio_node; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 657b65bf5cac..18bf3a8fdc50 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -82,7 +82,7 @@ struct cpdma_desc { struct cpdma_desc_pool { phys_addr_t phys; - u32 hw_addr; + dma_addr_t hw_addr; void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ int desc_size, mem_size; @@ -152,7 +152,7 @@ struct cpdma_chan { * abstract out these details */ static struct cpdma_desc_pool * -cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, +cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, int size, int align) { int bitmap_size; @@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, if (phys) { pool->phys = phys; - pool->iomap = ioremap(phys, size); + pool->iomap = ioremap(phys, size); /* should be memremap? */ pool->hw_addr = hw_addr; } else { - pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, + pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, GFP_KERNEL); - pool->iomap = pool->cpumap; - pool->hw_addr = pool->phys; + pool->iomap = (void __iomem __force *)pool->cpumap; + pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } if (pool->iomap) diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 33bd3b902304..5d9abedd6b75 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1644,10 +1644,7 @@ static int emac_dev_open(struct net_device *ndev) priv->speed = 0; priv->duplex = ~0; - dev_info(emac_dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, id=%x)\n", - priv->phydev->drv->name, dev_name(&priv->phydev->dev), - priv->phydev->phy_id); + phy_attached_info(priv->phydev); } if (!priv->phydev) { diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index c00084d689f3..4e7c9b9b042a 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -393,10 +393,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) /* scan and dump the bus */ for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phy = data->bus->phy_map[addr]; + phy = mdiobus_get_phy(data->bus, addr); if (phy) { dev_info(dev, "phy[%d]: device %s, driver %s\n", - phy->addr, dev_name(&phy->dev), + phy->mdio.addr, phydev_name(phy), phy->drv ? phy->drv->name : "unknown"); } } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c61d66d38634..8586a2034019 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1835,22 +1835,26 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; } -static int netcp_setup_tc(struct net_device *dev, u8 num_tc) +static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev tc) { int i; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || - (dev->real_num_tx_queues < num_tc)) + (dev->real_num_tx_queues < tc->tc)) return -EINVAL; /* Configure traffic class to queue mappings */ - if (num_tc) { - netdev_set_num_tc(dev, num_tc); - for (i = 0; i < num_tc; i++) + if (tc->tc) { + netdev_set_num_tc(dev, tc->tc); + for (i = 0; i < tc->tc; i++) netdev_set_tc_queue(dev, i, 1, i); } else { netdev_reset_tc(dev); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 4e70e7586a09..d543298d6750 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2178,7 +2178,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) return -ENODEV; } dev_dbg(priv->dev, "phy found: id is: 0x%s\n", - dev_name(&slave->phy->dev)); + phydev_name(slave->phy)); phy_start(slave->phy); phy_read_status(slave->phy); } @@ -2681,7 +2681,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, slave->phy = NULL; } else { dev_dbg(dev, "phy found: id is: 0x%s\n", - dev_name(&slave->phy->dev)); + phydev_name(slave->phy)); phy_start(slave->phy); phy_read_status(slave->phy); } diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 45ac38d29ed8..54874783476a 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -608,40 +608,25 @@ static void tc_handle_link_change(struct net_device *dev) static int tc_mii_probe(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; + struct phy_device *phydev; u32 dropmask; - /* find the first phy */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (lp->mii_bus->phy_map[phy_addr]) { - if (phydev) { - printk(KERN_ERR "%s: multiple PHYs found\n", - dev->name); - return -EINVAL; - } - phydev = lp->mii_bus->phy_map[phy_addr]; - break; - } - } - + phydev = phy_find_first(lp->mii_bus); if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV; } /* attach the mac to the phy */ - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &tc_handle_link_change, lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } - printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, id=%x)\n", - dev->name, phydev->drv->name, dev_name(&phydev->dev), - phydev->phy_id); + + phy_attached_info(phydev); /* mask with MAC supported features */ phydev->supported &= PHY_BASIC_FEATURES; @@ -669,7 +654,6 @@ static int tc_mii_init(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int err; - int i; lp->mii_bus = mdiobus_alloc(); if (lp->mii_bus == NULL) { @@ -684,18 +668,9 @@ static int tc_mii_init(struct net_device *dev) (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); lp->mii_bus->priv = dev; lp->mii_bus->parent = &lp->pci_dev->dev; - lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!lp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mii_bus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(lp->mii_bus); if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mii_bus; err = tc_mii_probe(dev); if (err) goto err_out_unregister_bus; @@ -703,8 +678,6 @@ static int tc_mii_init(struct net_device *dev) err_out_unregister_bus: mdiobus_unregister(lp->mii_bus); -err_out_free_mdio_irq: - kfree(lp->mii_bus->irq); err_out_free_mii_bus: mdiobus_free(lp->mii_bus); err_out: @@ -882,7 +855,6 @@ static void tc35815_remove_one(struct pci_dev *pdev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); free_netdev(dev); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 522abe2ff25a..902457e43628 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -337,7 +337,6 @@ struct temac_local { /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ - int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ /* IO registers, dma functions and IRQs */ void __iomem *regs; diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c index 415de1eaf641..7714aff78b7d 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c +++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c @@ -92,7 +92,6 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) bus->read = temac_mdio_read; bus->write = temac_mdio_write; bus->parent = lp->dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; @@ -114,7 +113,6 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) void temac_mdio_teardown(struct temac_local *lp) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 7cb9abac95c8..9ead4e269409 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -385,7 +385,6 @@ struct axidma_bd { * @phy_dev: Pointer to PHY device structure attached to the axienet_local * @phy_node: Pointer to device node structure * @mii_bus: Pointer to MII bus structure - * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space * @dma_err_tasklet: Tasklet structure to process Axi DMA errors @@ -426,7 +425,6 @@ struct axienet_local { /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ - int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ /* IO registers, dma functions and IRQs */ void __iomem *regs; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 507bbb0355c2..63307ea97846 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -212,7 +212,6 @@ issue: bus->read = axienet_mdio_read; bus->write = axienet_mdio_write; bus->parent = lp->dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; ret = of_mdiobus_register(bus, np1); @@ -232,7 +231,6 @@ issue: void axienet_mdio_teardown(struct axienet_local *lp) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index cf468c87ce57..e324b3092380 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -114,7 +114,6 @@ * @phy_dev: pointer to the PHY device * @phy_node: pointer to the PHY device node * @mii_bus: pointer to the MII bus - * @mdio_irqs: IRQs table for MDIO bus * @last_link: last link status * @has_mdio: indicates whether MDIO is included in the HW */ @@ -135,7 +134,6 @@ struct net_local { struct device_node *phy_node; struct mii_bus *mii_bus; - int mdio_irqs[PHY_MAX_ADDR]; int last_link; bool has_mdio; @@ -829,7 +827,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) dev_info(dev, "MDIO of the phy is not registered yet\n"); else - put_device(&phydev->dev); + put_device(&phydev->mdio.dev); return 0; } @@ -852,7 +850,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus->read = xemaclite_mdio_read; bus->write = xemaclite_mdio_write; bus->parent = dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; @@ -1196,7 +1193,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev) /* Un-register the mii_bus, if configured */ if (lp->has_mdio) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 7f975a2c8990..b0de8ecd7fe8 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -533,8 +533,8 @@ static int dfx_register(struct device *bdev) const char *print_name = dev_name(bdev); struct net_device *dev; DFX_board_t *bp; /* board pointer */ - resource_size_t bar_start[3]; /* pointers to ports */ - resource_size_t bar_len[3]; /* resource length */ + resource_size_t bar_start[3] = {0}; /* pointers to ports */ + resource_size_t bar_len[3] = {0}; /* resource length */ int alloc_size; /* total buffer size used */ struct resource *region; int err = 0; @@ -3697,8 +3697,8 @@ static void dfx_unregister(struct device *bdev) int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; - resource_size_t bar_start[3]; /* pointers to ports */ - resource_size_t bar_len[3]; /* resource lengths */ + resource_size_t bar_start[3] = {0}; /* pointers to ports */ + resource_size_t bar_len[3] = {0}; /* resource lengths */ int alloc_size; /* total buffer size used */ unregister_netdev(dev); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 24b077a32c1c..dfbe3ca687f7 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -72,6 +72,7 @@ struct geneve_dev { bool collect_md; struct gro_cells gro_cells; u32 flags; + struct dst_cache dst_cache; }; /* Geneve device flags */ @@ -109,6 +110,11 @@ static __be64 vni_to_tunnel_id(const __u8 *vni) #endif } +static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) +{ + return gs->sock->sk->sk_family; +} + static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, __be32 addr, u8 vni[]) { @@ -152,58 +158,60 @@ static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) return (struct genevehdr *)(udp_hdr(skb) + 1); } -/* geneve receive/decap routine */ -static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) +static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs, + struct sk_buff *skb) { - struct genevehdr *gnvh = geneve_hdr(skb); - struct metadata_dst *tun_dst = NULL; - struct geneve_dev *geneve = NULL; - struct pcpu_sw_netstats *stats; - struct iphdr *iph = NULL; + u8 *vni; __be32 addr; static u8 zero_vni[3]; - u8 *vni; - int err = 0; - sa_family_t sa_family; #if IS_ENABLED(CONFIG_IPV6) - struct ipv6hdr *ip6h = NULL; - struct in6_addr addr6; static struct in6_addr zero_addr6; #endif - sa_family = gs->sock->sk->sk_family; + if (geneve_get_sk_family(gs) == AF_INET) { + struct iphdr *iph; - if (sa_family == AF_INET) { iph = ip_hdr(skb); /* outer IP header... */ if (gs->collect_md) { vni = zero_vni; addr = 0; } else { - vni = gnvh->vni; - + vni = geneve_hdr(skb)->vni; addr = iph->saddr; } - geneve = geneve_lookup(gs, addr, vni); + return geneve_lookup(gs, addr, vni); #if IS_ENABLED(CONFIG_IPV6) - } else if (sa_family == AF_INET6) { + } else if (geneve_get_sk_family(gs) == AF_INET6) { + struct ipv6hdr *ip6h; + struct in6_addr addr6; + ip6h = ipv6_hdr(skb); /* outer IPv6 header... */ if (gs->collect_md) { vni = zero_vni; addr6 = zero_addr6; } else { - vni = gnvh->vni; - + vni = geneve_hdr(skb)->vni; addr6 = ip6h->saddr; } - geneve = geneve6_lookup(gs, addr6, vni); + return geneve6_lookup(gs, addr6, vni); #endif } - if (!geneve) - goto drop; + return NULL; +} + +/* geneve receive/decap routine */ +static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, + struct sk_buff *skb) +{ + struct genevehdr *gnvh = geneve_hdr(skb); + struct metadata_dst *tun_dst = NULL; + struct pcpu_sw_netstats *stats; + int err = 0; + void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { __be16 flags; @@ -212,7 +220,7 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) (gnvh->oam ? TUNNEL_OAM : 0) | (gnvh->critical ? TUNNEL_CRIT_OPT : 0); - tun_dst = udp_tun_rx_dst(skb, sa_family, flags, + tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4); if (!tun_dst) @@ -229,7 +237,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) } skb_reset_mac_header(skb); - skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev))); skb->protocol = eth_type_trans(skb, geneve->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -240,25 +247,27 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) goto drop; + oiph = skb_network_header(skb); skb_reset_network_header(skb); - if (iph) - err = IP_ECN_decapsulate(iph, skb); + if (geneve_get_sk_family(gs) == AF_INET) + err = IP_ECN_decapsulate(oiph, skb); #if IS_ENABLED(CONFIG_IPV6) - if (ip6h) - err = IP6_ECN_decapsulate(ip6h, skb); + else + err = IP6_ECN_decapsulate(oiph, skb); #endif if (unlikely(err)) { if (log_ecn_error) { - if (iph) + if (geneve_get_sk_family(gs) == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", - &iph->saddr, iph->tos); + &((struct iphdr *)oiph)->saddr, + ((struct iphdr *)oiph)->tos); #if IS_ENABLED(CONFIG_IPV6) - if (ip6h) + else net_info_ratelimited("non-ECT from %pI6\n", - &ip6h->saddr); + &((struct ipv6hdr *)oiph)->saddr); #endif } if (err > 1) { @@ -297,6 +306,13 @@ static int geneve_init(struct net_device *dev) return err; } + err = dst_cache_init(&geneve->dst_cache, GFP_KERNEL); + if (err) { + free_percpu(dev->tstats); + gro_cells_destroy(&geneve->gro_cells); + return err; + } + return 0; } @@ -304,6 +320,7 @@ static void geneve_uninit(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); + dst_cache_destroy(&geneve->dst_cache); gro_cells_destroy(&geneve->gro_cells); free_percpu(dev->tstats); } @@ -312,6 +329,7 @@ static void geneve_uninit(struct net_device *dev) static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct genevehdr *geneveh; + struct geneve_dev *geneve; struct geneve_sock *gs; int opts_len; @@ -327,16 +345,21 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) goto error; - opts_len = geneveh->opt_len * 4; - if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, - htons(ETH_P_TEB))) - goto drop; - gs = rcu_dereference_sk_user_data(sk); if (!gs) goto drop; - geneve_rx(gs, skb); + geneve = geneve_lookup_skb(gs, skb); + if (!geneve) + goto drop; + + opts_len = geneveh->opt_len * 4; + if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, + htons(ETH_P_TEB), + !net_eq(geneve->net, dev_net(geneve->dev)))) + goto drop; + + geneve_rx(geneve, gs, skb); return 0; drop: @@ -383,12 +406,12 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs) struct net_device *dev; struct sock *sk = gs->sock->sk; struct net *net = sock_net(sk); - sa_family_t sa_family = sk->sk_family; + sa_family_t sa_family = geneve_get_sk_family(gs); __be16 port = inet_sk(sk)->inet_sport; int err; if (sa_family == AF_INET) { - err = udp_add_offload(&gs->udp_offloads); + err = udp_add_offload(sock_net(sk), &gs->udp_offloads); if (err) pr_warn("geneve: udp_add_offload failed with status %d\n", err); @@ -544,7 +567,7 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs) struct net_device *dev; struct sock *sk = gs->sock->sk; struct net *net = sock_net(sk); - sa_family_t sa_family = sk->sk_family; + sa_family_t sa_family = geneve_get_sk_family(gs); __be16 port = inet_sk(sk)->inet_sport; rcu_read_lock(); @@ -587,7 +610,7 @@ static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, list_for_each_entry(gs, &gn->sock_list, list) { if (inet_sk(gs->sock->sk)->inet_sport == dst_port && - inet_sk(gs->sock->sk)->sk.sk_family == family) { + geneve_get_sk_family(gs) == family) { return gs; } } @@ -753,7 +776,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct ip_tunnel_info *info) { struct geneve_dev *geneve = netdev_priv(dev); + struct dst_cache *dst_cache; struct rtable *rt = NULL; + bool use_cache = true; __u8 tos; memset(fl4, 0, sizeof(*fl4)); @@ -764,16 +789,26 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; fl4->flowi4_tos = RT_TOS(info->key.tos); + dst_cache = &info->dst_cache; } else { tos = geneve->tos; if (tos == 1) { const struct iphdr *iip = ip_hdr(skb); tos = ip_tunnel_get_dsfield(iip, skb); + use_cache = false; } fl4->flowi4_tos = RT_TOS(tos); fl4->daddr = geneve->remote.sin.sin_addr.s_addr; + dst_cache = &geneve->dst_cache; + } + + use_cache = use_cache && !skb->mark; + if (use_cache) { + rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); + if (rt) + return rt; } rt = ip_route_output_key(geneve->net, fl4); @@ -786,6 +821,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, ip_rt_put(rt); return ERR_PTR(-ELOOP); } + if (use_cache) + dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr); return rt; } @@ -798,6 +835,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; + struct dst_cache *dst_cache; + bool use_cache = true; __u8 prio; memset(fl6, 0, sizeof(*fl6)); @@ -808,16 +847,26 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; fl6->flowi6_tos = RT_TOS(info->key.tos); + dst_cache = &info->dst_cache; } else { prio = geneve->tos; if (prio == 1) { const struct iphdr *iip = ip_hdr(skb); prio = ip_tunnel_get_dsfield(iip, skb); + use_cache = false; } fl6->flowi6_tos = RT_TOS(prio); fl6->daddr = geneve->remote.sin6.sin6_addr; + dst_cache = &geneve->dst_cache; + } + + use_cache = use_cache && !skb->mark; + if (use_cache) { + dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); + if (dst) + return dst; } if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { @@ -830,6 +879,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, return ERR_PTR(-ELOOP); } + if (use_cache) + dst_cache_set_ip6(dst_cache, dst, &fl6->saddr); return dst; } #endif @@ -980,9 +1031,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, opts = ip_tunnel_info_opts(info); if (key->tun_flags & TUNNEL_CSUM) - flags |= GENEVE_F_UDP_CSUM; + flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX; else - flags &= ~GENEVE_F_UDP_CSUM; + flags |= GENEVE_F_UDP_ZERO_CSUM6_TX; err = geneve6_build_skb(dst, skb, key->tun_flags, vni, info->options_len, opts, @@ -1272,6 +1323,8 @@ static int geneve_configure(struct net *net, struct net_device *dev, return -EPERM; } + dst_cache_reset(&geneve->dst_cache); + err = register_netdevice(dev); if (err) return err; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 9f0b1c342b77..5a1e98547031 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -683,6 +683,12 @@ static void sixpack_close(struct tty_struct *tty) if (!atomic_dec_and_test(&sp->refcnt)) down(&sp->dead_sem); + /* We must stop the queue to avoid potentially scribbling + * on the free buffers. The sp->dead_sem is not sufficient + * to protect us from sp->xbuff access. + */ + netif_stop_queue(sp->dev); + del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 636b65c66d49..7b916d5b14b9 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -80,6 +80,7 @@ #include <linux/hdlcdrv.h> #include <linux/baycom.h> #include <linux/jiffies.h> +#include <linux/time64.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -228,14 +229,15 @@ static inline unsigned int hweight8(unsigned int w) /* --------------------------------------------------------------------- */ -static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timeval *tv, unsigned char curs) +static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timespec64 *ts, unsigned char curs) { int timediff; int bdus8 = bc->baud_us >> 3; int bdus4 = bc->baud_us >> 2; int bdus2 = bc->baud_us >> 1; - timediff = 1000000 + tv->tv_usec - bc->modem.ser12.pll_time; + timediff = 1000000 + ts->tv_nsec / NSEC_PER_USEC - + bc->modem.ser12.pll_time; while (timediff >= 500000) timediff -= 1000000; while (timediff >= bdus2) { @@ -287,7 +289,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct baycom_state *bc = netdev_priv(dev); - struct timeval tv; + struct timespec64 ts; unsigned char iir, msr; unsigned int txcount = 0; @@ -297,7 +299,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id) if ((iir = inb(IIR(dev->base_addr))) & 1) return IRQ_NONE; /* get current time */ - do_gettimeofday(&tv); + ktime_get_ts64(&ts); msr = inb(MSR(dev->base_addr)); /* delta DCD */ if ((msr & 8) && bc->opt_dcd) @@ -340,7 +342,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id) } iir = inb(IIR(dev->base_addr)); } while (!(iir & 1)); - ser12_rx(dev, bc, &tv, msr & 0x10); /* CTS */ + ser12_rx(dev, bc, &ts, msr & 0x10); /* CTS */ if (bc->modem.ptt && txcount) { if (bc->modem.ser12.txshreg <= 1) { bc->modem.ser12.txshreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv); diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index c3d377770616..e4137c1b3df9 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -451,7 +451,7 @@ static const struct net_device_ops scc_netdev_ops = { static int __init setup_adapter(int card_base, int type, int n) { - int i, irq, chip; + int i, irq, chip, err; struct scc_info *info; struct net_device *dev; struct scc_priv *priv; @@ -463,14 +463,17 @@ static int __init setup_adapter(int card_base, int type, int n) /* Initialize what is necessary for write_scc and write_scc_data */ info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); - if (!info) + if (!info) { + err = -ENOMEM; goto out; + } info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); if (!info->dev[0]) { printk(KERN_ERR "dmascc: " "could not allocate memory for %s at %#3x\n", hw[type].name, card_base); + err = -ENOMEM; goto out1; } @@ -479,6 +482,7 @@ static int __init setup_adapter(int card_base, int type, int n) printk(KERN_ERR "dmascc: " "could not allocate memory for %s at %#3x\n", hw[type].name, card_base); + err = -ENOMEM; goto out2; } spin_lock_init(&info->register_lock); @@ -549,6 +553,7 @@ static int __init setup_adapter(int card_base, int type, int n) printk(KERN_ERR "dmascc: could not find irq of %s at %#3x (irq=%d)\n", hw[type].name, card_base, irq); + err = -ENODEV; goto out3; } @@ -585,11 +590,13 @@ static int __init setup_adapter(int card_base, int type, int n) if (register_netdev(info->dev[0])) { printk(KERN_ERR "dmascc: could not register %s\n", info->dev[0]->name); + err = -ENODEV; goto out3; } if (register_netdev(info->dev[1])) { printk(KERN_ERR "dmascc: could not register %s\n", info->dev[1]->name); + err = -ENODEV; goto out4; } @@ -612,7 +619,7 @@ static int __init setup_adapter(int card_base, int type, int n) out1: kfree(info); out: - return -1; + return err; } diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 0b72b9de5207..85828f153445 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -797,6 +797,11 @@ static void mkiss_close(struct tty_struct *tty) */ if (!atomic_dec_and_test(&ax->refcnt)) down(&ax->dead_sem); + /* + * Halt the transmit queue so that a new transmit cannot scribble + * on our buffers + */ + netif_stop_queue(ax->dev); /* Free all AX25 frame buffers. */ kfree(ax->rbuff); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index f4130af09244..fcb92c0d0eb9 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -624,6 +624,7 @@ struct nvsp_message { #define RNDIS_PKT_ALIGN_DEFAULT 8 struct multi_send_data { + struct sk_buff *skb; /* skb containing the pkt */ struct hv_netvsc_packet *pkt; /* netvsc pkt pending */ u32 count; /* counter of batched packets */ }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 059fc5231601..ec313fc08d82 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -841,6 +841,18 @@ static inline int netvsc_send_pkt( return ret; } +/* Move packet out of multi send data (msd), and clear msd */ +static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, + struct sk_buff **msd_skb, + struct multi_send_data *msdp) +{ + *msd_skb = msdp->skb; + *msd_send = msdp->pkt; + msdp->skb = NULL; + msdp->pkt = NULL; + msdp->count = 0; +} + int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, @@ -855,6 +867,7 @@ int netvsc_send(struct hv_device *device, unsigned int section_index = NETVSC_INVALID_INDEX; struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; + struct sk_buff *msd_skb = NULL; bool try_batch; bool xmit_more = (skb != NULL) ? skb->xmit_more : false; @@ -897,10 +910,8 @@ int netvsc_send(struct hv_device *device, net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); if (section_index != NETVSC_INVALID_INDEX) { - msd_send = msdp->pkt; - msdp->pkt = NULL; - msdp->count = 0; - msd_len = 0; + move_pkt_msd(&msd_send, &msd_skb, msdp); + msd_len = 0; } } @@ -919,31 +930,31 @@ int netvsc_send(struct hv_device *device, packet->total_data_buflen += msd_len; } - if (msdp->pkt) - dev_kfree_skb_any(skb); + if (msdp->skb) + dev_kfree_skb_any(msdp->skb); if (xmit_more && !packet->cp_partial) { + msdp->skb = skb; msdp->pkt = packet; msdp->count++; } else { cur_send = packet; + msdp->skb = NULL; msdp->pkt = NULL; msdp->count = 0; } } else { - msd_send = msdp->pkt; - msdp->pkt = NULL; - msdp->count = 0; + move_pkt_msd(&msd_send, &msd_skb, msdp); cur_send = packet; } if (msd_send) { - m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb); + m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb); if (m_ret != 0) { netvsc_free_send_slot(net_device, msd_send->send_buf_index); - dev_kfree_skb_any(skb); + dev_kfree_skb_any(msd_skb); } } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1c8db9afdcda..c72e5b83afdb 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -43,6 +43,11 @@ #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) +#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_HW_CSUM) static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); @@ -196,65 +201,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } -union sub_key { - u64 k; - struct { - u8 pad[3]; - u8 kb; - u32 ka; - }; -}; - -/* Toeplitz hash function - * data: network byte order - * return: host byte order - */ -static u32 comp_hash(u8 *key, int klen, void *data, int dlen) -{ - union sub_key subk; - int k_next = 4; - u8 dt; - int i, j; - u32 ret = 0; - - subk.k = 0; - subk.ka = ntohl(*(u32 *)key); - - for (i = 0; i < dlen; i++) { - subk.kb = key[k_next]; - k_next = (k_next + 1) % klen; - dt = ((u8 *)data)[i]; - for (j = 0; j < 8; j++) { - if (dt & 0x80) - ret ^= subk.ka; - dt <<= 1; - subk.k <<= 1; - } - } - - return ret; -} - -static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) -{ - struct flow_keys flow; - int data_len; - - if (!skb_flow_dissect_flow_keys(skb, &flow, 0) || - !(flow.basic.n_proto == htons(ETH_P_IP) || - flow.basic.n_proto == htons(ETH_P_IPV6))) - return false; - - if (flow.basic.ip_proto == IPPROTO_TCP) - data_len = 12; - else - data_len = 8; - - *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len); - - return true; -} - static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { @@ -267,11 +213,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) return 0; - if (netvsc_set_hash(&hash, skb)) { - q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % - ndev->real_num_tx_queues; - skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); - } + hash = skb_get_hash(skb); + q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % + ndev->real_num_tx_queues; if (!nvsc_dev->chn_table[q_idx]) q_idx = 0; @@ -1142,10 +1086,8 @@ static int netvsc_probe(struct hv_device *dev, net->netdev_ops = &device_ops; - net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO; - net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | NETIF_F_TSO; + net->hw_features = NETVSC_HW_FEATURES; + net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX; net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index e65b60591317..d50add705a79 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -21,6 +21,8 @@ #include <linux/skbuff.h> #include <linux/of_gpio.h> #include <linux/ieee802154.h> +#include <linux/crc-ccitt.h> +#include <asm/unaligned.h> #include <net/mac802154.h> #include <net/cfg802154.h> @@ -189,6 +191,18 @@ #define CC2520_RXFIFOCNT 0x3E #define CC2520_TXFIFOCNT 0x3F +/* CC2520_FRMFILT0 */ +#define FRMFILT0_FRAME_FILTER_EN BIT(0) +#define FRMFILT0_PAN_COORDINATOR BIT(1) + +/* CC2520_FRMCTRL0 */ +#define FRMCTRL0_AUTOACK BIT(5) +#define FRMCTRL0_AUTOCRC BIT(6) + +/* CC2520_FRMCTRL1 */ +#define FRMCTRL1_SET_RXENMASK_ON_TX BIT(0) +#define FRMCTRL1_IGNORE_TX_UNDERF BIT(1) + /* Driver private information */ struct cc2520_private { struct spi_device *spi; /* SPI device structure */ @@ -201,6 +215,7 @@ struct cc2520_private { struct work_struct fifop_irqwork;/* Workqueue for FIFOP */ spinlock_t lock; /* Lock for is_tx*/ struct completion tx_complete; /* Work completion for Tx */ + bool promiscuous; /* Flag for promiscuous mode */ }; /* Generic Functions */ @@ -367,14 +382,14 @@ cc2520_read_register(struct cc2520_private *priv, u8 reg, u8 *data) } static int -cc2520_write_txfifo(struct cc2520_private *priv, u8 *data, u8 len) +cc2520_write_txfifo(struct cc2520_private *priv, u8 pkt_len, u8 *data, u8 len) { int status; /* length byte must include FCS even * if it is calculated in the hardware */ - int len_byte = len + 2; + int len_byte = pkt_len; struct spi_message msg; @@ -414,7 +429,7 @@ cc2520_write_txfifo(struct cc2520_private *priv, u8 *data, u8 len) } static int -cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len, u8 *lqi) +cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len) { int status; struct spi_message msg; @@ -470,12 +485,25 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb) unsigned long flags; int rc; u8 status = 0; + u8 pkt_len; + + /* In promiscuous mode we disable AUTOCRC so we can get the raw CRC + * values on RX. This means we need to manually add the CRC on TX. + */ + if (priv->promiscuous) { + u16 crc = crc_ccitt(0, skb->data, skb->len); + + put_unaligned_le16(crc, skb_put(skb, 2)); + pkt_len = skb->len; + } else { + pkt_len = skb->len + 2; + } rc = cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX); if (rc) goto err_tx; - rc = cc2520_write_txfifo(priv, skb->data, skb->len); + rc = cc2520_write_txfifo(priv, pkt_len, skb->data, skb->len); if (rc) goto err_tx; @@ -518,22 +546,62 @@ static int cc2520_rx(struct cc2520_private *priv) u8 len = 0, lqi = 0, bytes = 1; struct sk_buff *skb; - cc2520_read_rxfifo(priv, &len, bytes, &lqi); + /* Read single length byte from the radio. */ + cc2520_read_rxfifo(priv, &len, bytes); - if (len < 2 || len > IEEE802154_MTU) - return -EINVAL; + if (!ieee802154_is_valid_psdu_len(len)) { + /* Corrupted frame received, clear frame buffer by + * reading entire buffer. + */ + dev_dbg(&priv->spi->dev, "corrupted frame received\n"); + len = IEEE802154_MTU; + } skb = dev_alloc_skb(len); if (!skb) return -ENOMEM; - if (cc2520_read_rxfifo(priv, skb_put(skb, len), len, &lqi)) { + if (cc2520_read_rxfifo(priv, skb_put(skb, len), len)) { dev_dbg(&priv->spi->dev, "frame reception failed\n"); kfree_skb(skb); return -EINVAL; } - skb_trim(skb, skb->len - 2); + /* In promiscuous mode, we configure the radio to include the + * CRC (AUTOCRC==0) and we pass on the packet unconditionally. If not + * in promiscuous mode, we check the CRC here, but leave the + * RSSI/LQI/CRC_OK bytes as they will get removed in the mac layer. + */ + if (!priv->promiscuous) { + bool crc_ok; + + /* Check if the CRC is valid. With AUTOCRC set, the most + * significant bit of the last byte returned from the CC2520 + * is CRC_OK flag. See section 20.3.4 of the datasheet. + */ + crc_ok = skb->data[len - 1] & BIT(7); + + /* If we failed CRC drop the packet in the driver layer. */ + if (!crc_ok) { + dev_dbg(&priv->spi->dev, "CRC check failed\n"); + kfree_skb(skb); + return -EINVAL; + } + + /* To calculate LQI, the lower 7 bits of the last byte (the + * correlation value provided by the radio) must be scaled to + * the range 0-255. According to section 20.6, the correlation + * value ranges from 50-110. Ideally this would be calibrated + * per hardware design, but we use roughly the datasheet values + * to get close enough while avoiding floating point. + */ + lqi = skb->data[len - 1] & 0x7f; + if (lqi < 50) + lqi = 50; + else if (lqi > 113) + lqi = 113; + lqi = (lqi - 50) * 4; + } ieee802154_rx_irqsafe(priv->hw, skb, lqi); @@ -619,14 +687,19 @@ cc2520_filter(struct ieee802154_hw *hw, } if (changed & IEEE802154_AFILT_PANC_CHANGED) { + u8 frmfilt0; + dev_vdbg(&priv->spi->dev, "cc2520_filter called for panc change\n"); + + cc2520_read_register(priv, CC2520_FRMFILT0, &frmfilt0); + if (filt->pan_coord) - ret = cc2520_write_register(priv, CC2520_FRMFILT0, - 0x02); + frmfilt0 |= FRMFILT0_PAN_COORDINATOR; else - ret = cc2520_write_register(priv, CC2520_FRMFILT0, - 0x00); + frmfilt0 &= ~FRMFILT0_PAN_COORDINATOR; + + ret = cc2520_write_register(priv, CC2520_FRMFILT0, frmfilt0); } return ret; @@ -723,6 +796,30 @@ cc2520_set_txpower(struct ieee802154_hw *hw, s32 mbm) return cc2520_cc2591_set_tx_power(priv, mbm); } +static int +cc2520_set_promiscuous_mode(struct ieee802154_hw *hw, bool on) +{ + struct cc2520_private *priv = hw->priv; + u8 frmfilt0; + + dev_dbg(&priv->spi->dev, "%s : mode %d\n", __func__, on); + + priv->promiscuous = on; + + cc2520_read_register(priv, CC2520_FRMFILT0, &frmfilt0); + + if (on) { + /* Disable automatic ACK, automatic CRC, and frame filtering. */ + cc2520_write_register(priv, CC2520_FRMCTRL0, 0); + frmfilt0 &= ~FRMFILT0_FRAME_FILTER_EN; + } else { + cc2520_write_register(priv, CC2520_FRMCTRL0, FRMCTRL0_AUTOACK | + FRMCTRL0_AUTOCRC); + frmfilt0 |= FRMFILT0_FRAME_FILTER_EN; + } + return cc2520_write_register(priv, CC2520_FRMFILT0, frmfilt0); +} + static const struct ieee802154_ops cc2520_ops = { .owner = THIS_MODULE, .start = cc2520_start, @@ -732,6 +829,7 @@ static const struct ieee802154_ops cc2520_ops = { .set_channel = cc2520_set_channel, .set_hw_addr_filt = cc2520_filter, .set_txpower = cc2520_set_txpower, + .set_promiscuous_mode = cc2520_set_promiscuous_mode, }; static int cc2520_register(struct cc2520_private *priv) @@ -749,7 +847,8 @@ static int cc2520_register(struct cc2520_private *priv) /* We do support only 2.4 Ghz */ priv->hw->phy->supported.channels[0] = 0x7FFF800; - priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AFILT; + priv->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | + IEEE802154_HW_PROMISCUOUS; priv->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER; @@ -919,6 +1018,11 @@ static int cc2520_hw_init(struct cc2520_private *priv) } /* Registers default value: section 28.1 in Datasheet */ + + /* Set the CCA threshold to -50 dBm. This seems to have been copied + * from the TinyOS CC2520 driver and is much higher than the -84 dBm + * threshold suggested in the datasheet. + */ ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A); if (ret) goto err_ret; @@ -955,15 +1059,10 @@ static int cc2520_hw_init(struct cc2520_private *priv) if (ret) goto err_ret; - ret = cc2520_write_register(priv, CC2520_FRMCTRL0, 0x60); - if (ret) - goto err_ret; - - ret = cc2520_write_register(priv, CC2520_FRMCTRL1, 0x03); - if (ret) - goto err_ret; - - ret = cc2520_write_register(priv, CC2520_FRMFILT0, 0x00); + /* Configure registers correctly for this driver. */ + ret = cc2520_write_register(priv, CC2520_FRMCTRL1, + FRMCTRL1_SET_RXENMASK_ON_TX | + FRMCTRL1_IGNORE_TX_UNDERF); if (ret) goto err_ret; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index f94392d07126..7a3b41468a55 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -468,6 +468,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->dev = dev; ipvlan->port = port; ipvlan->sfeatures = IPVLAN_FEATURES; + ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); /* TODO Probably put random address here to be presented to the diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h index 29cbde8501ed..d47cf14bb4a5 100644 --- a/drivers/net/irda/bfin_sir.h +++ b/drivers/net/irda/bfin_sir.h @@ -82,9 +82,6 @@ struct bfin_sir_self { #define DRIVER_NAME "bfin_sir" -#define port_membase(port) (((struct bfin_sir_port *)(port))->membase) -#define get_lsr_cache(port) (((struct bfin_sir_port *)(port))->lsr) -#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v)) #include <asm/bfin_serial.h> static const unsigned short per[][4] = { diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c index 6d2f55959c49..b977d6d33e74 100644 --- a/drivers/net/irda/toim3232-sir.c +++ b/drivers/net/irda/toim3232-sir.c @@ -130,16 +130,6 @@ static int toim3232delay = 150; /* default is 150 ms */ module_param(toim3232delay, int, 0); MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay"); -#if 0 -static int toim3232flipdtr = 0; /* default is DTR high to reset */ -module_param(toim3232flipdtr, int, 0); -MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)"); - -static int toim3232fliprts = 0; /* default is RTS high for baud change */ -module_param(toim3232fliptrs, int, 0); -MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)"); -#endif - static int toim3232_open(struct sir_dev *); static int toim3232_close(struct sir_dev *); static int toim3232_change_speed(struct sir_dev *, unsigned); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6a57a005e0ca..426a2cc27ac8 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1069,7 +1069,7 @@ EXPORT_SYMBOL_GPL(macvlan_common_setup); static void macvlan_setup(struct net_device *dev) { macvlan_common_setup(dev); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; } static int macvlan_port_create(struct net_device *dev) @@ -1323,6 +1323,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); + linkwatch_fire_event(dev); return 0; @@ -1522,6 +1523,7 @@ static int macvlan_device_event(struct notifier_block *unused, port = macvlan_port_get_rtnl(dev); switch (event) { + case NETDEV_UP: case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 60994a83a0d6..f0a77020037a 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -186,6 +186,7 @@ config MDIO_GPIO config MDIO_OCTEON tristate "Support for MDIO buses on Octeon and ThunderX SOCs" depends on 64BIT + depends on HAS_IOMEM help This module provides a driver for the Octeon and ThunderX MDIO diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index f31a4e25cf15..680e88f9915a 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,6 +1,6 @@ # Makefile for Linux PHY drivers -libphy-objs := phy.o phy_device.o mdio_bus.o +libphy-objs := phy.o phy_device.o mdio_bus.o mdio_device.o obj-$(CONFIG_PHYLIB) += libphy.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 65a488f82eb8..18141c022b13 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -72,7 +72,6 @@ static struct phy_driver am79c_driver[] = { { .read_status = genphy_read_status, .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(am79c_driver); diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index f1936b7a7af6..09b0b0aa8d68 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -128,7 +128,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_AQ2104, @@ -141,7 +140,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_AQR105, @@ -154,7 +152,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_AQR405, @@ -167,7 +164,6 @@ static struct phy_driver aquantia_driver[] = { .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, .read_status = aquantia_read_status, - .driver = { .owner = THIS_MODULE,}, }, }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 2d020a3ec0b5..2174ec937b4d 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -20,10 +20,21 @@ #include <linux/gpio/consumer.h> #define AT803X_INTR_ENABLE 0x12 +#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15) +#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14) +#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13) +#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12) +#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11) +#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10) +#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5) +#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1) +#define AT803X_INTR_ENABLE_WOL BIT(0) + #define AT803X_INTR_STATUS 0x13 + #define AT803X_SMART_SPEED 0x14 #define AT803X_LED_CONTROL 0x18 -#define AT803X_WOL_ENABLE 0x01 + #define AT803X_DEVICE_ADDR 0x03 #define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C #define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B @@ -31,13 +42,15 @@ #define AT803X_MMD_ACCESS_CONTROL 0x0D #define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E #define AT803X_FUNC_DATA 0x4003 -#define AT803X_INER 0x0012 -#define AT803X_INER_INIT 0xec00 -#define AT803X_INSR 0x0013 + #define AT803X_DEBUG_ADDR 0x1D #define AT803X_DEBUG_DATA 0x1E -#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05 -#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8) + +#define AT803X_DEBUG_REG_0 0x00 +#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) + +#define AT803X_DEBUG_REG_5 0x05 +#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) #define ATH8030_PHY_ID 0x004dd076 #define ATH8031_PHY_ID 0x004dd074 @@ -61,6 +74,46 @@ struct at803x_context { u16 led_control; }; +static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg) +{ + int ret; + + ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg); + if (ret < 0) + return ret; + + return phy_read(phydev, AT803X_DEBUG_DATA); +} + +static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg, + u16 clear, u16 set) +{ + u16 val; + int ret; + + ret = at803x_debug_reg_read(phydev, reg); + if (ret < 0) + return ret; + + val = ret & 0xffff; + val &= ~clear; + val |= set; + + return phy_write(phydev, AT803X_DEBUG_DATA, val); +} + +static inline int at803x_enable_rx_delay(struct phy_device *phydev) +{ + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, + AT803X_DEBUG_RX_CLK_DLY_EN); +} + +static inline int at803x_enable_tx_delay(struct phy_device *phydev) +{ + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0, + AT803X_DEBUG_TX_CLK_DLY_EN); +} + /* save relevant PHY registers to private copy */ static void at803x_context_save(struct phy_device *phydev, struct at803x_context *context) @@ -119,14 +172,14 @@ static int at803x_set_wol(struct phy_device *phydev, } value = phy_read(phydev, AT803X_INTR_ENABLE); - value |= AT803X_WOL_ENABLE; + value |= AT803X_INTR_ENABLE_WOL; ret = phy_write(phydev, AT803X_INTR_ENABLE, value); if (ret) return ret; value = phy_read(phydev, AT803X_INTR_STATUS); } else { value = phy_read(phydev, AT803X_INTR_ENABLE); - value &= (~AT803X_WOL_ENABLE); + value &= (~AT803X_INTR_ENABLE_WOL); ret = phy_write(phydev, AT803X_INTR_ENABLE, value); if (ret) return ret; @@ -145,7 +198,7 @@ static void at803x_get_wol(struct phy_device *phydev, wol->wolopts = 0; value = phy_read(phydev, AT803X_INTR_ENABLE); - if (value & AT803X_WOL_ENABLE) + if (value & AT803X_INTR_ENABLE_WOL) wol->wolopts |= WAKE_MAGIC; } @@ -157,7 +210,7 @@ static int at803x_suspend(struct phy_device *phydev) mutex_lock(&phydev->lock); value = phy_read(phydev, AT803X_INTR_ENABLE); - wol_enabled = value & AT803X_WOL_ENABLE; + wol_enabled = value & AT803X_INTR_ENABLE_WOL; value = phy_read(phydev, MII_BMCR); @@ -190,7 +243,7 @@ static int at803x_resume(struct phy_device *phydev) static int at803x_probe(struct phy_device *phydev) { - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct at803x_priv *priv; struct gpio_desc *gpiod_reset; @@ -217,14 +270,17 @@ static int at803x_config_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - ret = phy_write(phydev, AT803X_DEBUG_ADDR, - AT803X_DEBUG_SYSTEM_MODE_CTRL); - if (ret) + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { + ret = at803x_enable_rx_delay(phydev); + if (ret < 0) return ret; - ret = phy_write(phydev, AT803X_DEBUG_DATA, - AT803X_DEBUG_RGMII_TX_CLK_DLY); - if (ret) + } + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { + ret = at803x_enable_tx_delay(phydev); + if (ret < 0) return ret; } @@ -235,7 +291,7 @@ static int at803x_ack_interrupt(struct phy_device *phydev) { int err; - err = phy_read(phydev, AT803X_INSR); + err = phy_read(phydev, AT803X_INTR_STATUS); return (err < 0) ? err : 0; } @@ -245,13 +301,19 @@ static int at803x_config_intr(struct phy_device *phydev) int err; int value; - value = phy_read(phydev, AT803X_INER); + value = phy_read(phydev, AT803X_INTR_ENABLE); + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + value |= AT803X_INTR_ENABLE_AUTONEG_ERR; + value |= AT803X_INTR_ENABLE_SPEED_CHANGED; + value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED; + value |= AT803X_INTR_ENABLE_LINK_FAIL; + value |= AT803X_INTR_ENABLE_LINK_SUCCESS; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - err = phy_write(phydev, AT803X_INER, - value | AT803X_INER_INIT); + err = phy_write(phydev, AT803X_INTR_ENABLE, value); + } else - err = phy_write(phydev, AT803X_INER, 0); + err = phy_write(phydev, AT803X_INTR_ENABLE, 0); return err; } @@ -281,8 +343,8 @@ static void at803x_link_change_notify(struct phy_device *phydev) at803x_context_restore(phydev, &context); - dev_dbg(&phydev->dev, "%s(): phy was reset\n", - __func__); + phydev_dbg(phydev, "%s(): phy was reset\n", + __func__); priv->phy_reset = true; } } else { @@ -310,9 +372,6 @@ static struct phy_driver at803x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, }, { /* ATHEROS 8030 */ .phy_id = ATH8030_PHY_ID, @@ -325,15 +384,12 @@ static struct phy_driver at803x_driver[] = { .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, - .features = PHY_GBIT_FEATURES, + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, }, { /* ATHEROS 8031 */ .phy_id = ATH8031_PHY_ID, @@ -352,9 +408,6 @@ static struct phy_driver at803x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, } }; module_phy_driver(at803x_driver); diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index ddb377e53633..df0416db0b88 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -184,25 +184,25 @@ int bcm_phy_enable_eee(struct phy_device *phydev) /* Enable EEE at PHY level */ val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (val < 0) return val; val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X; phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, - MDIO_MMD_AN, phydev->addr, (u32)val); + MDIO_MMD_AN, (u32)val); /* Advertise EEE */ val = phy_read_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (val < 0) return val; val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV, - MDIO_MMD_AN, phydev->addr, (u32)val); + MDIO_MMD_AN, (u32)val); return 0; } diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index 86b28052bf06..e741bf614c4e 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -56,7 +56,6 @@ static struct phy_driver bcm63xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, @@ -69,7 +68,6 @@ static struct phy_driver bcm63xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, } }; module_phy_driver(bcm63xx_driver); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index d4083c381cd1..9b311041ebfb 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -24,7 +24,7 @@ #define MII_BCM7XXX_100TX_FALSE_CAR 0x13 #define MII_BCM7XXX_100TX_DISC 0x14 #define MII_BCM7XXX_AUX_MODE 0x1d -#define MII_BCM7XX_64CLK_MDIO BIT(12) +#define MII_BCM7XXX_64CLK_MDIO BIT(12) #define MII_BCM7XXX_TEST 0x1f #define MII_BCM7XXX_SHD_MODE_2 BIT(2) @@ -170,7 +170,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) int ret = 0; pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", - dev_name(&phydev->dev), phydev->drv->name, rev, patch); + phydev_name(phydev), phydev->drv->name, rev, patch); /* Dummy read to a register to workaround an issue upon reset where the * internal inverter may not allow the first MDIO transaction to pass @@ -247,7 +247,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev) int ret; /* Enable 64 clock MDIO */ - phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); + phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XXX_64CLK_MDIO); phy_read(phydev, MII_BCM7XXX_AUX_MODE); /* Workaround only required for 100Mbits/sec capable PHYs */ @@ -324,7 +324,21 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev) .config_aneg = genphy_config_aneg, \ .read_status = genphy_read_status, \ .resume = bcm7xxx_28nm_resume, \ - .driver = { .owner = THIS_MODULE }, \ +} + +#define BCM7XXX_40NM_EPHY(_oui, _name) \ +{ \ + .phy_id = (_oui), \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + .features = PHY_BASIC_FEATURES | \ + SUPPORTED_Pause | SUPPORTED_Asym_Pause, \ + .flags = PHY_IS_INTERNAL, \ + .config_init = bcm7xxx_config_init, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + .suspend = bcm7xxx_suspend, \ + .resume = bcm7xxx_config_init, \ } static struct phy_driver bcm7xxx_driver[] = { @@ -334,46 +348,10 @@ static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), + BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), + BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), + BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), { - .phy_id = PHY_ID_BCM7425, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7425", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, -}, { - .phy_id = PHY_ID_BCM7429, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7429", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, -}, { - .phy_id = PHY_ID_BCM7435, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7435", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, -}, { .phy_id = PHY_BCM_OUI_4, .phy_id_mask = 0xffff0000, .name = "Broadcom BCM7XXX 40nm", @@ -385,7 +363,6 @@ static struct phy_driver bcm7xxx_driver[] = { .read_status = genphy_read_status, .suspend = bcm7xxx_suspend, .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_BCM_OUI_5, .phy_id_mask = 0xffffff00, @@ -398,7 +375,6 @@ static struct phy_driver bcm7xxx_driver[] = { .read_status = genphy_read_status, .suspend = bcm7xxx_suspend, .resume = bcm7xxx_config_init, - .driver = { .owner = THIS_MODULE }, } }; static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 1eca20452f03..f7ebdcff53e4 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -40,10 +40,10 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) const __be32 *paddr_end; int len, ret; - if (!phydev->dev.of_node) + if (!phydev->mdio.dev.of_node) return 0; - paddr = of_get_property(phydev->dev.of_node, + paddr = of_get_property(phydev->mdio.dev.of_node, "broadcom,c45-reg-init", &len); if (!paddr) return 0; @@ -163,8 +163,9 @@ static int bcm87xx_did_interrupt(struct phy_device *phydev) reg = phy_read(phydev, BCM87XX_LASI_STATUS); if (reg < 0) { - dev_err(&phydev->dev, - "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg); + phydev_err(phydev, + "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", + reg); return 0; } return (reg & 1) != 0; @@ -200,7 +201,6 @@ static struct phy_driver bcm87xx_driver[] = { .config_intr = bcm87xx_config_intr, .did_interrupt = bcm87xx_did_interrupt, .match_phy_device = bcm8706_match_phy_device, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM8727, .phy_id_mask = 0xffffffff, @@ -213,7 +213,6 @@ static struct phy_driver bcm87xx_driver[] = { .config_intr = bcm87xx_config_intr, .did_interrupt = bcm87xx_did_interrupt, .match_phy_device = bcm8727_match_phy_device, - .driver = { .owner = THIS_MODULE }, } }; module_phy_driver(bcm87xx_driver); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 3ce5d9514623..870327efccf7 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -460,7 +460,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, @@ -473,7 +472,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, @@ -486,7 +484,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM54616S, .phy_id_mask = 0xfffffff0, @@ -499,7 +496,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5464, .phy_id_mask = 0xfffffff0, @@ -512,7 +508,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5481, .phy_id_mask = 0xfffffff0, @@ -525,7 +520,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5482, .phy_id_mask = 0xfffffff0, @@ -538,7 +532,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = bcm5482_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM50610, .phy_id_mask = 0xfffffff0, @@ -551,7 +544,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM50610M, .phy_id_mask = 0xfffffff0, @@ -564,7 +556,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM57780, .phy_id_mask = 0xfffffff0, @@ -577,7 +568,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCMAC131, .phy_id_mask = 0xfffffff0, @@ -590,7 +580,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM5241, .phy_id_mask = 0xfffffff0, @@ -603,7 +592,6 @@ static struct phy_driver broadcom_drivers[] = { .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, - .driver = { .owner = THIS_MODULE }, } }; module_phy_driver(broadcom_drivers); diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index 27f5464899d4..d339c1afea77 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -114,7 +114,6 @@ static struct phy_driver cis820x_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x000fc440, .name = "Cicada Cis8204", @@ -126,7 +125,6 @@ static struct phy_driver cis820x_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(cis820x_driver); diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index 2a328703b4ae..36e3e2033eca 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -156,7 +156,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x0181b8b0, .name = "Davicom DM9161B/C", @@ -168,7 +167,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x0181b8a0, .name = "Davicom DM9161A", @@ -180,7 +178,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x00181b80, .name = "Davicom DM9131", @@ -191,7 +188,6 @@ static struct phy_driver dm91xx_driver[] = { .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(dm91xx_driver); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 47b711739ba9..7a240fce3a7e 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -220,9 +220,10 @@ static void rx_timestamp_work(struct work_struct *work); #define BROADCAST_ADDR 31 -static inline int broadcast_write(struct mii_bus *bus, u32 regnum, u16 val) +static inline int broadcast_write(struct phy_device *phydev, u32 regnum, + u16 val) { - return mdiobus_write(bus, BROADCAST_ADDR, regnum, val); + return mdiobus_write(phydev->mdio.bus, BROADCAST_ADDR, regnum, val); } /* Caller must hold extreg_lock. */ @@ -232,7 +233,7 @@ static int ext_read(struct phy_device *phydev, int page, u32 regnum) int val; if (dp83640->clock->page != page) { - broadcast_write(phydev->bus, PAGESEL, page); + broadcast_write(phydev, PAGESEL, page); dp83640->clock->page = page; } val = phy_read(phydev, regnum); @@ -247,11 +248,11 @@ static void ext_write(int broadcast, struct phy_device *phydev, struct dp83640_private *dp83640 = phydev->priv; if (dp83640->clock->page != page) { - broadcast_write(phydev->bus, PAGESEL, page); + broadcast_write(phydev, PAGESEL, page); dp83640->clock->page = page; } if (broadcast) - broadcast_write(phydev->bus, regnum, val); + broadcast_write(phydev, regnum, val); else phy_write(phydev, regnum, val); } @@ -845,6 +846,11 @@ static void decode_rxts(struct dp83640_private *dp83640, struct skb_shared_hwtstamps *shhwtstamps = NULL; struct sk_buff *skb; unsigned long flags; + u8 overflow; + + overflow = (phy_rxts->ns_hi >> 14) & 0x3; + if (overflow) + pr_debug("rx timestamp queue overflow, count %d\n", overflow); spin_lock_irqsave(&dp83640->rx_lock, flags); @@ -887,6 +893,7 @@ static void decode_txts(struct dp83640_private *dp83640, struct skb_shared_hwtstamps shhwtstamps; struct sk_buff *skb; u64 ns; + u8 overflow; /* We must already have the skb that triggered this. */ @@ -896,6 +903,17 @@ static void decode_txts(struct dp83640_private *dp83640, pr_debug("have timestamp but tx_queue empty\n"); return; } + + overflow = (phy_txts->ns_hi >> 14) & 0x3; + if (overflow) { + pr_debug("tx timestamp queue overflow, count %d\n", overflow); + while (skb) { + skb_complete_tx_timestamp(skb, NULL); + skb = skb_dequeue(&dp83640->tx_queue); + } + return; + } + ns = phy2txts(phy_txts); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(ns); @@ -1039,7 +1057,7 @@ static int choose_this_phy(struct dp83640_clock *clock, if (chosen_phy == -1 && !clock->chosen) return 1; - if (chosen_phy == phydev->addr) + if (chosen_phy == phydev->mdio.addr) return 1; return 0; @@ -1103,10 +1121,10 @@ static int dp83640_probe(struct phy_device *phydev) struct dp83640_private *dp83640; int err = -ENOMEM, i; - if (phydev->addr == BROADCAST_ADDR) + if (phydev->mdio.addr == BROADCAST_ADDR) return 0; - clock = dp83640_clock_get_bus(phydev->bus); + clock = dp83640_clock_get_bus(phydev->mdio.bus); if (!clock) goto no_clock; @@ -1132,7 +1150,8 @@ static int dp83640_probe(struct phy_device *phydev) if (choose_this_phy(clock, phydev)) { clock->chosen = dp83640; - clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev); + clock->ptp_clock = ptp_clock_register(&clock->caps, + &phydev->mdio.dev); if (IS_ERR(clock->ptp_clock)) { err = PTR_ERR(clock->ptp_clock); goto no_register; @@ -1158,7 +1177,7 @@ static void dp83640_remove(struct phy_device *phydev) struct list_head *this, *next; struct dp83640_private *tmp, *dp83640 = phydev->priv; - if (phydev->addr == BROADCAST_ADDR) + if (phydev->mdio.addr == BROADCAST_ADDR) return; enable_status_frames(phydev, false); @@ -1490,12 +1509,11 @@ static struct phy_driver dp83640_driver = { .hwtstamp = dp83640_hwtstamp, .rxtstamp = dp83640_rxtstamp, .txtstamp = dp83640_txtstamp, - .driver = {.owner = THIS_MODULE,} }; static int __init dp83640_init(void) { - return phy_driver_register(&dp83640_driver); + return phy_driver_register(&dp83640_driver, THIS_MODULE); } static void __exit dp83640_exit(void) diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 5ce9bef54468..03d54c4adc88 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -1,7 +1,7 @@ /* * Driver for the Texas Instruments DP83848 PHY * - * Copyright (C) 2015 Texas Instruments Inc. + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,11 +16,13 @@ #include <linux/module.h> #include <linux/phy.h> -#define DP83848_PHY_ID 0x20005c90 +#define TI_DP83848C_PHY_ID 0x20005ca0 +#define NS_DP83848C_PHY_ID 0x20005c90 +#define TLK10X_PHY_ID 0x2000a210 /* Registers */ -#define DP83848_MICR 0x11 -#define DP83848_MISR 0x12 +#define DP83848_MICR 0x11 /* MII Interrupt Control Register */ +#define DP83848_MISR 0x12 /* MII Interrupt Status Register */ /* MICR Register Fields */ #define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */ @@ -36,6 +38,12 @@ #define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */ #define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */ +#define DP83848_INT_EN_MASK \ + (DP83848_MISR_ANC_INT_EN | \ + DP83848_MISR_DUP_INT_EN | \ + DP83848_MISR_SPD_INT_EN | \ + DP83848_MISR_LINK_INT_EN) + static int dp83848_ack_interrupt(struct phy_device *phydev) { int err = phy_read(phydev, DP83848_MISR); @@ -45,52 +53,58 @@ static int dp83848_ack_interrupt(struct phy_device *phydev) static int dp83848_config_intr(struct phy_device *phydev) { - int err; + int control, ret; + + control = phy_read(phydev, DP83848_MICR); + if (control < 0) + return control; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - err = phy_write(phydev, DP83848_MICR, - DP83848_MICR_INT_OE | - DP83848_MICR_INTEN); - if (err < 0) - return err; - - return phy_write(phydev, DP83848_MISR, - DP83848_MISR_ANC_INT_EN | - DP83848_MISR_DUP_INT_EN | - DP83848_MISR_SPD_INT_EN | - DP83848_MISR_LINK_INT_EN); + control |= DP83848_MICR_INT_OE; + control |= DP83848_MICR_INTEN; + + ret = phy_write(phydev, DP83848_MISR, DP83848_INT_EN_MASK); + if (ret < 0) + return ret; + } else { + control &= ~DP83848_MICR_INTEN; } - return phy_write(phydev, DP83848_MICR, 0x0); + return phy_write(phydev, DP83848_MICR, control); } static struct mdio_device_id __maybe_unused dp83848_tbl[] = { - { DP83848_PHY_ID, 0xfffffff0 }, + { TI_DP83848C_PHY_ID, 0xfffffff0 }, + { NS_DP83848C_PHY_ID, 0xfffffff0 }, + { TLK10X_PHY_ID, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, dp83848_tbl); +#define DP83848_PHY_DRIVER(_id, _name) \ + { \ + .phy_id = _id, \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + .features = PHY_BASIC_FEATURES, \ + .flags = PHY_HAS_INTERRUPT, \ + \ + .soft_reset = genphy_soft_reset, \ + .config_init = genphy_config_init, \ + .suspend = genphy_suspend, \ + .resume = genphy_resume, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + \ + /* IRQ related */ \ + .ack_interrupt = dp83848_ack_interrupt, \ + .config_intr = dp83848_config_intr, \ + } + static struct phy_driver dp83848_driver[] = { - { - .phy_id = DP83848_PHY_ID, - .phy_id_mask = 0xfffffff0, - .name = "TI DP83848", - .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, - - .soft_reset = genphy_soft_reset, - .config_init = genphy_config_init, - .suspend = genphy_suspend, - .resume = genphy_resume, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - - /* IRQ related */ - .ack_interrupt = dp83848_ack_interrupt, - .config_intr = dp83848_config_intr, - - .driver = { .owner = THIS_MODULE, }, - }, + DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), }; module_phy_driver(dp83848_driver); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 4ebf601073d9..2afa61b51d41 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -103,7 +103,7 @@ static int dp83867_config_intr(struct phy_device *phydev) static int dp83867_of_init(struct phy_device *phydev) { struct dp83867_private *dp83867 = phydev->priv; - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; int ret; @@ -137,7 +137,7 @@ static int dp83867_config_init(struct phy_device *phydev) u16 val, delay; if (!phydev->priv) { - dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867), + dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867), GFP_KERNEL); if (!dp83867) return -ENOMEM; @@ -160,7 +160,7 @@ static int dp83867_config_init(struct phy_device *phydev) if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, - DP83867_DEVADDR, phydev->addr); + DP83867_DEVADDR); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN); @@ -172,13 +172,13 @@ static int dp83867_config_init(struct phy_device *phydev) val |= DP83867_RGMII_RX_CLK_DELAY_EN; phy_write_mmd_indirect(phydev, DP83867_RGMIICTL, - DP83867_DEVADDR, phydev->addr, val); + DP83867_DEVADDR, val); delay = (dp83867->rx_id_delay | (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT)); phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL, - DP83867_DEVADDR, phydev->addr, delay); + DP83867_DEVADDR, delay); } return 0; @@ -214,8 +214,6 @@ static struct phy_driver dp83867_driver[] = { .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = {.owner = THIS_MODULE,} }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index a907743816a8..a9a4edfa23c8 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -95,7 +95,6 @@ static struct phy_driver et1011c_driver[] = { { .flags = PHY_POLL, .config_aneg = et1011c_config_aneg, .read_status = et1011c_read_status, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(et1011c_driver); diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index e23bf5b90e17..ab9c473d75ea 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -27,7 +27,6 @@ #define MII_REGS_NUM 29 struct fixed_mdio_bus { - int irqs[PHY_MAX_ADDR]; struct mii_bus *mii_bus; struct list_head phys; }; @@ -198,11 +197,11 @@ int fixed_phy_set_link_update(struct phy_device *phydev, struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; - if (!phydev || !phydev->bus) + if (!phydev || !phydev->mdio.bus) return -EINVAL; list_for_each_entry(fp, &fmb->phys, node) { - if (fp->addr == phydev->addr) { + if (fp->addr == phydev->mdio.addr) { fp->link_update = link_update; fp->phydev = phydev; return 0; @@ -220,11 +219,11 @@ int fixed_phy_update_state(struct phy_device *phydev, struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; - if (!phydev || phydev->bus != fmb->mii_bus) + if (!phydev || phydev->mdio.bus != fmb->mii_bus) return -EINVAL; list_for_each_entry(fp, &fmb->phys, node) { - if (fp->addr == phydev->addr) { + if (fp->addr == phydev->mdio.addr) { #define _UPD(x) if (changed->x) \ fp->status.x = status->x _UPD(link); @@ -256,7 +255,7 @@ int fixed_phy_add(unsigned int irq, int phy_addr, memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); - fmb->irqs[phy_addr] = irq; + fmb->mii_bus->irq[phy_addr] = irq; fp->addr = phy_addr; fp->status = *status; @@ -345,7 +344,7 @@ struct phy_device *fixed_phy_register(unsigned int irq, } of_node_get(np); - phy->dev.of_node = np; + phy->mdio.dev.of_node = np; phy->is_pseudo_fixed_link = true; switch (status->speed) { @@ -395,7 +394,6 @@ static int __init fixed_mdio_bus_init(void) fmb->mii_bus->parent = &pdev->dev; fmb->mii_bus->read = &fixed_mdio_read; fmb->mii_bus->write = &fixed_mdio_write; - fmb->mii_bus->irq = fmb->irqs; ret = mdiobus_register(fmb->mii_bus); if (ret) diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 0dbc445a5fa0..e5f251b91578 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -53,43 +53,43 @@ static int ip175c_config_init(struct phy_device *phydev) if (full_reset_performed == 0) { /* master reset */ - err = mdiobus_write(phydev->bus, 30, 0, 0x175c); + err = mdiobus_write(phydev->mdio.bus, 30, 0, 0x175c); if (err < 0) return err; /* ensure no bus delays overlap reset period */ - err = mdiobus_read(phydev->bus, 30, 0); + err = mdiobus_read(phydev->mdio.bus, 30, 0); /* data sheet specifies reset period is 2 msec */ mdelay(2); /* enable IP175C mode */ - err = mdiobus_write(phydev->bus, 29, 31, 0x175c); + err = mdiobus_write(phydev->mdio.bus, 29, 31, 0x175c); if (err < 0) return err; /* Set MII0 speed and duplex (in PHY mode) */ - err = mdiobus_write(phydev->bus, 29, 22, 0x420); + err = mdiobus_write(phydev->mdio.bus, 29, 22, 0x420); if (err < 0) return err; /* reset switch ports */ for (i = 0; i < 5; i++) { - err = mdiobus_write(phydev->bus, i, + err = mdiobus_write(phydev->mdio.bus, i, MII_BMCR, BMCR_RESET); if (err < 0) return err; } for (i = 0; i < 5; i++) - err = mdiobus_read(phydev->bus, i, MII_BMCR); + err = mdiobus_read(phydev->mdio.bus, i, MII_BMCR); mdelay(2); full_reset_performed = 1; } - if (phydev->addr != 4) { + if (phydev->mdio.addr != 4) { phydev->state = PHY_RUNNING; phydev->speed = SPEED_100; phydev->duplex = DUPLEX_FULL; @@ -184,7 +184,7 @@ static int ip101a_g_config_init(struct phy_device *phydev) static int ip175c_read_status(struct phy_device *phydev) { - if (phydev->addr == 4) /* WAN port */ + if (phydev->mdio.addr == 4) /* WAN port */ genphy_read_status(phydev); else /* Don't need to read status for switch ports */ @@ -195,7 +195,7 @@ static int ip175c_read_status(struct phy_device *phydev) static int ip175c_config_aneg(struct phy_device *phydev) { - if (phydev->addr == 4) /* WAN port */ + if (phydev->mdio.addr == 4) /* WAN port */ genphy_config_aneg(phydev); return 0; @@ -221,7 +221,6 @@ static struct phy_driver icplus_driver[] = { .read_status = &ip175c_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x02430d90, .name = "ICPlus IP1001", @@ -233,7 +232,6 @@ static struct phy_driver icplus_driver[] = { .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x02430c54, .name = "ICPlus IP101A/G", @@ -247,7 +245,6 @@ static struct phy_driver icplus_driver[] = { .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(icplus_driver); diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index a3a5a703635b..f6078376ef50 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -278,7 +278,6 @@ static struct phy_driver lxt97x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = lxt970_ack_interrupt, .config_intr = lxt970_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001378e0, .name = "LXT971", @@ -289,7 +288,6 @@ static struct phy_driver lxt97x_driver[] = { .read_status = genphy_read_status, .ack_interrupt = lxt971_ack_interrupt, .config_intr = lxt971_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x00137a10, .name = "LXT973-A2", @@ -299,7 +297,6 @@ static struct phy_driver lxt97x_driver[] = { .probe = lxt973_probe, .config_aneg = lxt973_config_aneg, .read_status = lxt973a2_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x00137a10, .name = "LXT973", @@ -309,7 +306,6 @@ static struct phy_driver lxt97x_driver[] = { .probe = lxt973_probe, .config_aneg = lxt973_config_aneg, .read_status = genphy_read_status, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(lxt97x_driver); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 50b5eac75854..1dcbd3ff9e38 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -133,6 +133,11 @@ #define MII_88E3016_DISABLE_SCRAMBLER 0x0200 #define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 +#define MII_88E1510_GEN_CTRL_REG_1 0x14 +#define MII_88E1510_GEN_CTRL_REG_1_MODE_MASK 0x7 +#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */ +#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */ + MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); @@ -300,10 +305,11 @@ static int marvell_of_reg_init(struct phy_device *phydev) const __be32 *paddr; int len, i, saved_page, current_page, page_changed, ret; - if (!phydev->dev.of_node) + if (!phydev->mdio.dev.of_node) return 0; - paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len); + paddr = of_get_property(phydev->mdio.dev.of_node, + "marvell,reg-init", &len); if (!paddr || len < (4 * sizeof(*paddr))) return 0; @@ -437,6 +443,41 @@ static int m88e1318_config_aneg(struct phy_device *phydev) return m88e1121_config_aneg(phydev); } +static int m88e1510_config_init(struct phy_device *phydev) +{ + int err; + int temp; + + /* SGMII-to-Copper mode initialization */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + /* Select page 18 */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 18); + if (err < 0) + return err; + + /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */ + temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1); + temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK; + temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII; + err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + if (err < 0) + return err; + + /* PHY reset is necessary after changing MODE[2:0] */ + temp |= MII_88E1510_GEN_CTRL_REG_1_RESET; + err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + if (err < 0) + return err; + + /* Reset page selection */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0); + if (err < 0) + return err; + } + + return 0; +} + static int m88e1510_config_aneg(struct phy_device *phydev) { int err; @@ -1060,7 +1101,7 @@ static int marvell_probe(struct phy_device *phydev) { struct marvell_priv *priv; - priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -1086,7 +1127,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1112, @@ -1105,7 +1145,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1111, @@ -1124,7 +1163,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1118, @@ -1143,7 +1181,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = {.owner = THIS_MODULE,}, }, { .phy_id = MARVELL_PHY_ID_88E1121R, @@ -1162,7 +1199,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1318S, @@ -1183,7 +1219,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1145, @@ -1202,7 +1237,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1149R, @@ -1221,7 +1255,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1240, @@ -1240,7 +1273,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1116R, @@ -1259,7 +1291,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1510, @@ -1268,6 +1299,7 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, + .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, @@ -1278,7 +1310,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E1540, @@ -1297,7 +1328,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, { .phy_id = MARVELL_PHY_ID_88E3016, @@ -1318,7 +1348,6 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, - .driver = { .owner = THIS_MODULE }, }, }; diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 4bde5e728fe0..8c73b2e771dd 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -200,16 +200,10 @@ static int unimac_mdio_probe(struct platform_device *pdev) bus->reset = unimac_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); - bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto out_mdio_free; - } - ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); - goto out_mdio_irq; + goto out_mdio_free; } platform_set_drvdata(pdev, priv); @@ -218,8 +212,6 @@ static int unimac_mdio_probe(struct platform_device *pdev) return 0; -out_mdio_irq: - kfree(bus->irq); out_mdio_free: mdiobus_free(bus); return ret; @@ -230,7 +222,6 @@ static int unimac_mdio_remove(struct platform_device *pdev) struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return 0; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 95f51d7267b3..27ab63064f95 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -159,7 +159,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, new_bus->phy_mask = pdata->phy_mask; new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; - new_bus->irq = pdata->irqs; + memcpy(new_bus->irq, pdata->irqs, sizeof(new_bus->irq)); new_bus->parent = dev; if (new_bus->phy_mask == ~0) diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c index f1fc51f655d9..5bb56d126693 100644 --- a/drivers/net/phy/mdio-moxart.c +++ b/drivers/net/phy/mdio-moxart.c @@ -130,13 +130,6 @@ static int moxart_mdio_probe(struct platform_device *pdev) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d-mii", pdev->name, pdev->id); bus->parent = &pdev->dev; - bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, - GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto err_out_free_mdiobus; - } - /* Setting PHY_IGNORE_INTERRUPT here even if it has no effect, * of_mdiobus_register() sets these PHY_POLL. * Ideally, the interrupt from MAC controller could be used to diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 7f8e7662e28c..308ade0eb1b6 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -34,7 +34,6 @@ struct mdio_mux_child_bus { struct mdio_mux_parent_bus *parent; struct mdio_mux_child_bus *next; int bus_number; - int phy_irq[PHY_MAX_ADDR]; }; /* @@ -157,7 +156,7 @@ int mdio_mux_init(struct device *dev, break; } cb->mii_bus->priv = cb; - cb->mii_bus->irq = cb->phy_irq; + cb->mii_bus->name = "mdio_mux"; snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", pb->parent_id, v); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index fcf4e4df7cc8..47d4f2f263d1 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -113,7 +113,6 @@ struct octeon_mdiobus { resource_size_t mdio_phys; resource_size_t regsize; enum octeon_mdiobus_mode mode; - int phy_irq[PHY_MAX_ADDR]; }; #ifdef CONFIG_CAVIUM_OCTEON_SOC @@ -268,12 +267,13 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, static int octeon_mdiobus_probe(struct platform_device *pdev) { struct octeon_mdiobus *bus; + struct mii_bus *mii_bus; struct resource *res_mem; union cvmx_smix_en smi_en; int err = -ENOENT; - bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); - if (!bus) + mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus)); + if (!mii_bus) return -ENOMEM; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -282,6 +282,8 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return -ENXIO; } + bus = mii_bus->priv; + bus->mii_bus = mii_bus; bus->mdio_phys = res_mem->start; bus->regsize = resource_size(res_mem); @@ -298,16 +300,11 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return -ENOMEM; } - bus->mii_bus = mdiobus_alloc(); - if (!bus->mii_bus) - goto fail; - smi_en.u64 = 0; smi_en.s.en = 1; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); bus->mii_bus->priv = bus; - bus->mii_bus->irq = bus->phy_irq; bus->mii_bus->name = "mdio-octeon"; snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base); bus->mii_bus->parent = &pdev->dev; @@ -326,7 +323,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return 0; fail_register: mdiobus_free(bus->mii_bus); -fail: smi_en.u64 = 0; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return err; diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 15bc7f9ea224..f70522c35163 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -96,7 +96,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev) struct mii_bus *bus; struct sun4i_mdio_data *data; struct resource *res; - int ret, i; + int ret; bus = mdiobus_alloc_size(sizeof(*data)); if (!bus) @@ -108,16 +108,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); bus->parent = &pdev->dev; - bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, - GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - bus->irq[i] = PHY_POLL; - data = bus->priv; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->membase = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 88cb4592b6fb..0cba64f1ecf4 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -38,6 +38,48 @@ #include <asm/irq.h> +int mdiobus_register_device(struct mdio_device *mdiodev) +{ + if (mdiodev->bus->mdio_map[mdiodev->addr]) + return -EBUSY; + + mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev; + + return 0; +} +EXPORT_SYMBOL(mdiobus_register_device); + +int mdiobus_unregister_device(struct mdio_device *mdiodev) +{ + if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) + return -EINVAL; + + mdiodev->bus->mdio_map[mdiodev->addr] = NULL; + + return 0; +} +EXPORT_SYMBOL(mdiobus_unregister_device); + +struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) +{ + struct mdio_device *mdiodev = bus->mdio_map[addr]; + + if (!mdiodev) + return NULL; + + if (!(mdiodev->flags & MDIO_DEVICE_FLAG_PHY)) + return NULL; + + return container_of(mdiodev, struct phy_device, mdio); +} +EXPORT_SYMBOL(mdiobus_get_phy); + +bool mdiobus_is_registered_device(struct mii_bus *bus, int addr) +{ + return bus->mdio_map[addr]; +} +EXPORT_SYMBOL(mdiobus_is_registered_device); + /** * mdiobus_alloc_size - allocate a mii_bus structure * @size: extra amount of memory to allocate for private storage. @@ -51,6 +93,7 @@ struct mii_bus *mdiobus_alloc_size(size_t size) struct mii_bus *bus; size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN); size_t alloc_size; + int i; /* If we alloc extra space, it should be aligned */ if (size) @@ -59,11 +102,16 @@ struct mii_bus *mdiobus_alloc_size(size_t size) alloc_size = sizeof(*bus); bus = kzalloc(alloc_size, GFP_KERNEL); - if (bus) { - bus->state = MDIOBUS_ALLOCATED; - if (size) - bus->priv = (void *)bus + aligned_size; - } + if (!bus) + return NULL; + + bus->state = MDIOBUS_ALLOCATED; + if (size) + bus->priv = (void *)bus + aligned_size; + + /* Initialise the interrupts to polling */ + for (i = 0; i < PHY_MAX_ADDR; i++) + bus->irq[i] = PHY_POLL; return bus; } @@ -190,47 +238,48 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) } EXPORT_SYMBOL(of_mdio_find_bus); -/* Walk the list of subnodes of a mdio bus and look for a node that matches the - * phy's address with its 'reg' property. If found, set the of_node pointer for - * the phy. This allows auto-probed pyh devices to be supplied with information - * passed in via DT. +/* Walk the list of subnodes of a mdio bus and look for a node that + * matches the mdio device's address with its 'reg' property. If + * found, set the of_node pointer for the mdio device. This allows + * auto-probed phy devices to be supplied with information passed in + * via DT. */ -static void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev) +static void of_mdiobus_link_mdiodev(struct mii_bus *bus, + struct mdio_device *mdiodev) { - struct device *dev = &phydev->dev; + struct device *dev = &mdiodev->dev; struct device_node *child; - if (dev->of_node || !mdio->dev.of_node) + if (dev->of_node || !bus->dev.of_node) return; - for_each_available_child_of_node(mdio->dev.of_node, child) { + for_each_available_child_of_node(bus->dev.of_node, child) { int addr; int ret; ret = of_property_read_u32(child, "reg", &addr); if (ret < 0) { - dev_err(dev, "%s has invalid PHY address\n", + dev_err(dev, "%s has invalid MDIO address\n", child->full_name); continue; } - /* A PHY must have a reg property in the range [0-31] */ + /* A MDIO device must have a reg property in the range [0-31] */ if (addr >= PHY_MAX_ADDR) { - dev_err(dev, "%s PHY address %i is too large\n", + dev_err(dev, "%s MDIO address %i is too large\n", child->full_name, addr); continue; } - if (addr == phydev->addr) { + if (addr == mdiodev->addr) { dev->of_node = child; return; } } } #else /* !IS_ENABLED(CONFIG_OF_MDIO) */ -static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev) +static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio, + struct mdio_device *mdiodev) { } #endif @@ -243,12 +292,15 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, * Description: Called by a bus driver to bring up all the PHYs * on a given bus, and attach them to the bus. Drivers should use * mdiobus_register() rather than __mdiobus_register() unless they - * need to pass a specific owner module. + * need to pass a specific owner module. MDIO devices which are not + * PHYs will not be brought up by this function. They are expected to + * to be explicitly listed in DT and instantiated by of_mdiobus_register(). * * Returns 0 on success or < 0 on error. */ int __mdiobus_register(struct mii_bus *bus, struct module *owner) { + struct mdio_device *mdiodev; int i, err; if (NULL == bus || NULL == bus->name || @@ -294,11 +346,12 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) error: while (--i >= 0) { - struct phy_device *phydev = bus->phy_map[i]; - if (phydev) { - phy_device_remove(phydev); - phy_device_free(phydev); - } + mdiodev = bus->mdio_map[i]; + if (!mdiodev) + continue; + + mdiodev->device_remove(mdiodev); + mdiodev->device_free(mdiodev); } device_del(&bus->dev); return err; @@ -307,17 +360,19 @@ EXPORT_SYMBOL(__mdiobus_register); void mdiobus_unregister(struct mii_bus *bus) { + struct mdio_device *mdiodev; int i; BUG_ON(bus->state != MDIOBUS_REGISTERED); bus->state = MDIOBUS_UNREGISTERED; for (i = 0; i < PHY_MAX_ADDR; i++) { - struct phy_device *phydev = bus->phy_map[i]; - if (phydev) { - phy_device_remove(phydev); - phy_device_free(phydev); - } + mdiodev = bus->mdio_map[i]; + if (!mdiodev) + continue; + + mdiodev->device_remove(mdiodev); + mdiodev->device_free(mdiodev); } device_del(&bus->dev); } @@ -346,6 +401,18 @@ void mdiobus_free(struct mii_bus *bus) } EXPORT_SYMBOL(mdiobus_free); +/** + * mdiobus_scan - scan a bus for MDIO devices. + * @bus: mii_bus to scan + * @addr: address on bus to scan + * + * This function scans the MDIO bus, looking for devices which can be + * identified using a vendor/product ID in registers 2 and 3. Not all + * MDIO devices have such registers, but PHY devices typically + * do. Hence this function assumes anything found is a PHY, or can be + * treated as a PHY. Other MDIO devices, such as switches, will + * probably not be found during the scan. + */ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { struct phy_device *phydev; @@ -359,7 +426,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) * For DT, see if the auto-probed phy has a correspoding child * in the bus node, and set the of_node pointer in this case. */ - of_mdiobus_link_phydev(bus, phydev); + of_mdiobus_link_mdiodev(bus, &phydev->mdio); err = phy_device_register(phydev); if (err) { @@ -476,133 +543,56 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) EXPORT_SYMBOL(mdiobus_write); /** - * mdio_bus_match - determine if given PHY driver supports the given PHY device - * @dev: target PHY device - * @drv: given PHY driver + * mdio_bus_match - determine if given MDIO driver supports the given + * MDIO device + * @dev: target MDIO device + * @drv: given MDIO driver * - * Description: Given a PHY device, and a PHY driver, return 1 if - * the driver supports the device. Otherwise, return 0. + * Description: Given a MDIO device, and a MDIO driver, return 1 if + * the driver supports the device. Otherwise, return 0. This may + * require calling the devices own match function, since different classes + * of MDIO devices have different match criteria. */ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { - struct phy_device *phydev = to_phy_device(dev); - struct phy_driver *phydrv = to_phy_driver(drv); - const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); - int i; + struct mdio_device *mdio = to_mdio_device(dev); if (of_driver_match_device(dev, drv)) return 1; - if (phydrv->match_phy_device) - return phydrv->match_phy_device(phydev); - - if (phydev->is_c45) { - for (i = 1; i < num_ids; i++) { - if (!(phydev->c45_ids.devices_in_package & (1 << i))) - continue; + if (mdio->bus_match) + return mdio->bus_match(dev, drv); - if ((phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->c45_ids.device_ids[i] & - phydrv->phy_id_mask)) - return 1; - } - return 0; - } else { - return (phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->phy_id & phydrv->phy_id_mask); - } + return 0; } #ifdef CONFIG_PM - -static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) -{ - struct device_driver *drv = phydev->dev.driver; - struct phy_driver *phydrv = to_phy_driver(drv); - struct net_device *netdev = phydev->attached_dev; - - if (!drv || !phydrv->suspend) - return false; - - /* PHY not attached? May suspend if the PHY has not already been - * suspended as part of a prior call to phy_disconnect() -> - * phy_detach() -> phy_suspend() because the parent netdev might be the - * MDIO bus driver and clock gated at this point. - */ - if (!netdev) - return !phydev->suspended; - - /* Don't suspend PHY if the attched netdev parent may wakeup. - * The parent may point to a PCI device, as in tg3 driver. - */ - if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) - return false; - - /* Also don't suspend PHY if the netdev itself may wakeup. This - * is the case for devices w/o underlaying pwr. mgmt. aware bus, - * e.g. SoC devices. - */ - if (device_may_wakeup(&netdev->dev)) - return false; - - return true; -} - static int mdio_bus_suspend(struct device *dev) { - struct phy_device *phydev = to_phy_device(dev); + struct mdio_device *mdio = to_mdio_device(dev); - /* We must stop the state machine manually, otherwise it stops out of - * control, possibly with the phydev->lock held. Upon resume, netdev - * may call phy routines that try to grab the same lock, and that may - * lead to a deadlock. - */ - if (phydev->attached_dev && phydev->adjust_link) - phy_stop_machine(phydev); + if (mdio->pm_ops && mdio->pm_ops->suspend) + return mdio->pm_ops->suspend(dev); - if (!mdio_bus_phy_may_suspend(phydev)) - return 0; - - return phy_suspend(phydev); + return 0; } static int mdio_bus_resume(struct device *dev) { - struct phy_device *phydev = to_phy_device(dev); - int ret; - - if (!mdio_bus_phy_may_suspend(phydev)) - goto no_resume; - - ret = phy_resume(phydev); - if (ret < 0) - return ret; + struct mdio_device *mdio = to_mdio_device(dev); -no_resume: - if (phydev->attached_dev && phydev->adjust_link) - phy_start_machine(phydev); + if (mdio->pm_ops && mdio->pm_ops->resume) + return mdio->pm_ops->resume(dev); return 0; } static int mdio_bus_restore(struct device *dev) { - struct phy_device *phydev = to_phy_device(dev); - struct net_device *netdev = phydev->attached_dev; - int ret; - - if (!netdev) - return 0; + struct mdio_device *mdio = to_mdio_device(dev); - ret = phy_init_hw(phydev); - if (ret < 0) - return ret; - - /* The PHY needs to renegotiate. */ - phydev->link = 0; - phydev->state = PHY_UP; - - phy_start_machine(phydev); + if (mdio->pm_ops && mdio->pm_ops->restore) + return mdio->pm_ops->restore(dev); return 0; } @@ -623,52 +613,10 @@ static const struct dev_pm_ops mdio_bus_pm_ops = { #endif /* CONFIG_PM */ -static ssize_t -phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); -} -static DEVICE_ATTR_RO(phy_id); - -static ssize_t -phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - const char *mode = NULL; - - if (phy_is_internal(phydev)) - mode = "internal"; - else - mode = phy_modes(phydev->interface); - - return sprintf(buf, "%s\n", mode); -} -static DEVICE_ATTR_RO(phy_interface); - -static ssize_t -phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "%d\n", phydev->has_fixups); -} -static DEVICE_ATTR_RO(phy_has_fixups); - -static struct attribute *mdio_dev_attrs[] = { - &dev_attr_phy_id.attr, - &dev_attr_phy_interface.attr, - &dev_attr_phy_has_fixups.attr, - NULL, -}; -ATTRIBUTE_GROUPS(mdio_dev); - struct bus_type mdio_bus_type = { .name = "mdio_bus", .match = mdio_bus_match, .pm = MDIO_BUS_PM_OPS, - .dev_groups = mdio_dev_groups, }; EXPORT_SYMBOL(mdio_bus_type); diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c new file mode 100644 index 000000000000..9c88e6749b9a --- /dev/null +++ b/drivers/net/phy/mdio_device.c @@ -0,0 +1,171 @@ +/* Framework for MDIO devices, other than PHYs. + * + * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mdio.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/unistd.h> + +void mdio_device_free(struct mdio_device *mdiodev) +{ + put_device(&mdiodev->dev); +} +EXPORT_SYMBOL(mdio_device_free); + +static void mdio_device_release(struct device *dev) +{ + kfree(to_mdio_device(dev)); +} + +struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr) +{ + struct mdio_device *mdiodev; + + /* We allocate the device, and initialize the default values */ + mdiodev = kzalloc(sizeof(*mdiodev), GFP_KERNEL); + if (!mdiodev) + return ERR_PTR(-ENOMEM); + + mdiodev->dev.release = mdio_device_release; + mdiodev->dev.parent = &bus->dev; + mdiodev->dev.bus = &mdio_bus_type; + mdiodev->device_free = mdio_device_free; + mdiodev->device_remove = mdio_device_remove; + mdiodev->bus = bus; + mdiodev->addr = addr; + + dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); + + device_initialize(&mdiodev->dev); + + return mdiodev; +} +EXPORT_SYMBOL(mdio_device_create); + +/** + * mdio_device_register - Register the mdio device on the MDIO bus + * @mdiodev: mdio_device structure to be added to the MDIO bus + */ +int mdio_device_register(struct mdio_device *mdiodev) +{ + int err; + + dev_info(&mdiodev->dev, "mdio_device_register\n"); + + err = mdiobus_register_device(mdiodev); + if (err) + return err; + + err = device_add(&mdiodev->dev); + if (err) { + pr_err("MDIO %d failed to add\n", mdiodev->addr); + goto out; + } + + return 0; + + out: + mdiobus_unregister_device(mdiodev); + return err; +} +EXPORT_SYMBOL(mdio_device_register); + +/** + * mdio_device_remove - Remove a previously registered mdio device from the + * MDIO bus + * @mdiodev: mdio_device structure to remove + * + * This doesn't free the mdio_device itself, it merely reverses the effects + * of mdio_device_register(). Use mdio_device_free() to free the device + * after calling this function. + */ +void mdio_device_remove(struct mdio_device *mdiodev) +{ + device_del(&mdiodev->dev); + mdiobus_unregister_device(mdiodev); +} +EXPORT_SYMBOL(mdio_device_remove); + +/** + * mdio_probe - probe an MDIO device + * @dev: device to probe + * + * Description: Take care of setting up the mdio_device structure + * and calling the driver to probe the device. + */ +static int mdio_probe(struct device *dev) +{ + struct mdio_device *mdiodev = to_mdio_device(dev); + struct device_driver *drv = mdiodev->dev.driver; + struct mdio_driver *mdiodrv = to_mdio_driver(drv); + int err = 0; + + if (mdiodrv->probe) + err = mdiodrv->probe(mdiodev); + + return err; +} + +static int mdio_remove(struct device *dev) +{ + struct mdio_device *mdiodev = to_mdio_device(dev); + struct device_driver *drv = mdiodev->dev.driver; + struct mdio_driver *mdiodrv = to_mdio_driver(drv); + + if (mdiodrv->remove) + mdiodrv->remove(mdiodev); + + return 0; +} + +/** + * mdio_driver_register - register an mdio_driver with the MDIO layer + * @new_driver: new mdio_driver to register + */ +int mdio_driver_register(struct mdio_driver *drv) +{ + struct mdio_driver_common *mdiodrv = &drv->mdiodrv; + int retval; + + pr_info("mdio_driver_register: %s\n", mdiodrv->driver.name); + + mdiodrv->driver.bus = &mdio_bus_type; + mdiodrv->driver.probe = mdio_probe; + mdiodrv->driver.remove = mdio_remove; + + retval = driver_register(&mdiodrv->driver); + if (retval) { + pr_err("%s: Error %d in registering driver\n", + mdiodrv->driver.name, retval); + + return retval; + } + + return 0; +} +EXPORT_SYMBOL(mdio_driver_register); + +void mdio_driver_unregister(struct mdio_driver *drv) +{ + struct mdio_driver_common *mdiodrv = &drv->mdiodrv; + + driver_unregister(&mdiodrv->driver); +} +EXPORT_SYMBOL(mdio_driver_unregister); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1a6048a8c29d..03833dbfca67 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -224,7 +224,7 @@ static int kszphy_setup_led(struct phy_device *phydev, u32 reg, int val) rc = phy_write(phydev, reg, temp); out: if (rc < 0) - dev_err(&phydev->dev, "failed to set led mode\n"); + phydev_err(phydev, "failed to set led mode\n"); return rc; } @@ -243,7 +243,7 @@ static int kszphy_broadcast_disable(struct phy_device *phydev) ret = phy_write(phydev, MII_KSZPHY_OMSO, ret | KSZPHY_OMSO_B_CAST_OFF); out: if (ret) - dev_err(&phydev->dev, "failed to disable broadcast address\n"); + phydev_err(phydev, "failed to disable broadcast address\n"); return ret; } @@ -263,7 +263,7 @@ static int kszphy_nand_tree_disable(struct phy_device *phydev) ret & ~KSZPHY_OMSO_NAND_TREE_ON); out: if (ret) - dev_err(&phydev->dev, "failed to disable NAND tree mode\n"); + phydev_err(phydev, "failed to disable NAND tree mode\n"); return ret; } @@ -288,7 +288,8 @@ static int kszphy_config_init(struct phy_device *phydev) if (priv->rmii_ref_clk_sel) { ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); if (ret) { - dev_err(&phydev->dev, "failed to set rmii reference clock\n"); + phydev_err(phydev, + "failed to set rmii reference clock\n"); return ret; } } @@ -349,7 +350,7 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev, static int ksz9021_config_init(struct phy_device *phydev) { - const struct device *dev = &phydev->dev; + const struct device *dev = &phydev->mdio.dev; const struct device_node *of_node = dev->of_node; const struct device *dev_walker; @@ -357,7 +358,7 @@ static int ksz9021_config_init(struct phy_device *phydev) * properties in the MAC node. Walk up the tree of devices to * find a device with an OF node. */ - dev_walker = &phydev->dev; + dev_walker = &phydev->mdio.dev; do { of_node = dev_walker->of_node; dev_walker = dev_walker->parent; @@ -470,7 +471,7 @@ static int ksz9031_center_flp_timing(struct phy_device *phydev) static int ksz9031_config_init(struct phy_device *phydev) { - const struct device *dev = &phydev->dev; + const struct device *dev = &phydev->mdio.dev; const struct device_node *of_node = dev->of_node; static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"}; static const char *rx_data_skews[4] = { @@ -482,9 +483,17 @@ static int ksz9031_config_init(struct phy_device *phydev) "txd2-skew-ps", "txd3-skew-ps" }; static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"}; + const struct device *dev_walker; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; + /* The Micrel driver has a deprecated option to place phy OF + * properties in the MAC node. Walk up the tree of devices to + * find a device with an OF node. + */ + dev_walker = &phydev->mdio.dev; + do { + of_node = dev_walker->of_node; + dev_walker = dev_walker->parent; + } while (!of_node && dev_walker); if (of_node) { ksz9031_of_load_skew_values(phydev, of_node, @@ -629,12 +638,12 @@ static void kszphy_get_stats(struct phy_device *phydev, static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; - const struct device_node *np = phydev->dev.of_node; + const struct device_node *np = phydev->mdio.dev.of_node; struct kszphy_priv *priv; struct clk *clk; int ret; - priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -649,15 +658,15 @@ static int kszphy_probe(struct phy_device *phydev) priv->led_mode = -1; if (priv->led_mode > 3) { - dev_err(&phydev->dev, "invalid led mode: 0x%02x\n", - priv->led_mode); + phydev_err(phydev, "invalid led mode: 0x%02x\n", + priv->led_mode); priv->led_mode = -1; } } else { priv->led_mode = -1; } - clk = devm_clk_get(&phydev->dev, "rmii-ref"); + clk = devm_clk_get(&phydev->mdio.dev, "rmii-ref"); /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */ if (!IS_ERR_OR_NULL(clk)) { unsigned long rate = clk_get_rate(clk); @@ -672,7 +681,8 @@ static int kszphy_probe(struct phy_device *phydev) } else if (rate > 49500000 && rate < 50500000) { priv->rmii_ref_clk_sel_val = !rmii_ref_clk_sel_25_mhz; } else { - dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate); + phydev_err(phydev, "Clock rate out of range: %ld\n", + rate); return -EINVAL; } } @@ -704,7 +714,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8021, .phy_id_mask = 0x00ffffff, @@ -724,7 +733,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8031, .phy_id_mask = 0x00ffffff, @@ -744,7 +752,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8041, .phy_id_mask = 0x00fffff0, @@ -764,7 +771,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8041RNLI, .phy_id_mask = 0x00fffff0, @@ -784,7 +790,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8051, .phy_id_mask = 0x00fffff0, @@ -804,7 +809,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8001, .name = "Micrel KSZ8001 or KS8721", @@ -823,7 +827,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8081, .name = "Micrel KSZ8081 or KSZ8091", @@ -842,7 +845,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8061, .name = "Micrel KSZ8061", @@ -859,7 +861,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, @@ -879,7 +880,6 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, .read_mmd_indirect = ksz9021_rd_mmd_phyreg, .write_mmd_indirect = ksz9021_wr_mmd_phyreg, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = 0x00fffff0, @@ -897,7 +897,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = 0x00fffff0, @@ -912,7 +911,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = PHY_ID_KSZ886X, .phy_id_mask = 0x00fffff0, @@ -927,7 +925,6 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, } }; module_phy_driver(ksphy_driver); diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index c0a20ebd083b..15f820648f82 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -68,7 +68,7 @@ int lan88xx_suspend(struct phy_device *phydev) static int lan88xx_probe(struct phy_device *phydev) { - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct lan88xx_priv *priv; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -78,10 +78,9 @@ static int lan88xx_probe(struct phy_device *phydev) priv->wolopts = 0; /* these values can be used to identify internal PHY */ - priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, - 3, phydev->addr); + priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, 3); priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV, - 3, phydev->addr); + 3); phydev->priv = priv; @@ -90,7 +89,7 @@ static int lan88xx_probe(struct phy_device *phydev) static void lan88xx_remove(struct phy_device *phydev) { - struct device *dev = &phydev->dev; + struct device *dev = &phydev->mdio.dev; struct lan88xx_priv *priv = phydev->priv; if (priv) @@ -130,8 +129,6 @@ static struct phy_driver microchip_phy_driver[] = { .suspend = lan88xx_suspend, .resume = genphy_resume, .set_wol = lan88xx_set_wol, - - .driver = { .owner = THIS_MODULE, } } }; module_phy_driver(microchip_phy_driver); diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 0a7b9c7f09a2..2a1b490bc587 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -140,7 +140,6 @@ static struct phy_driver dp83865_driver[] = { { .read_status = genphy_read_status, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, - .driver = {.owner = THIS_MODULE,} } }; module_phy_driver(dp83865_driver); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 47cd306dbb3c..5590b9c182c9 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -319,7 +319,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) { u32 speed = ethtool_cmd_speed(cmd); - if (cmd->phy_address != phydev->addr) + if (cmd->phy_address != phydev->mdio.addr) return -EINVAL; /* We make sure that we don't pass unsupported values in to the PHY */ @@ -375,7 +375,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) cmd->port = PORT_BNC; else cmd->port = PORT_MII; - cmd->phy_address = phydev->addr; + cmd->phy_address = phydev->mdio.addr; cmd->transceiver = phy_is_internal(phydev) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; @@ -403,16 +403,17 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: - mii_data->phy_id = phydev->addr; + mii_data->phy_id = phydev->mdio.addr; /* fall through */ case SIOCGMIIREG: - mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id, + mii_data->val_out = mdiobus_read(phydev->mdio.bus, + mii_data->phy_id, mii_data->reg_num); return 0; case SIOCSMIIREG: - if (mii_data->phy_id == phydev->addr) { + if (mii_data->phy_id == phydev->mdio.addr) { switch (mii_data->reg_num) { case MII_BMCR: if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { @@ -445,10 +446,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } } - mdiobus_write(phydev->bus, mii_data->phy_id, + mdiobus_write(phydev->mdio.bus, mii_data->phy_id, mii_data->reg_num, val); - if (mii_data->phy_id == phydev->addr && + if (mii_data->phy_id == phydev->mdio.addr && mii_data->reg_num == MII_BMCR && val & BMCR_RESET) return phy_init_hw(phydev); @@ -643,7 +644,7 @@ int phy_start_interrupts(struct phy_device *phydev) if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", phydev) < 0) { pr_warn("%s: Can't get IRQ %d (PHY)\n", - phydev->bus->name, phydev->irq); + phydev->mdio.bus->name, phydev->irq); phydev->irq = PHY_POLL; return 0; } @@ -691,25 +692,29 @@ void phy_change(struct work_struct *work) struct phy_device *phydev = container_of(work, struct phy_device, phy_queue); - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - goto ignore; + if (phy_interrupt_is_valid(phydev)) { + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + goto ignore; - if (phy_disable_interrupts(phydev)) - goto phy_err; + if (phy_disable_interrupts(phydev)) + goto phy_err; + } mutex_lock(&phydev->lock); if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) phydev->state = PHY_CHANGELINK; mutex_unlock(&phydev->lock); - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); + if (phy_interrupt_is_valid(phydev)) { + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); - /* Reenable interrupts */ - if (PHY_HALTED != phydev->state && - phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) - goto irq_enable_err; + /* Reenable interrupts */ + if (PHY_HALTED != phydev->state && + phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) + goto irq_enable_err; + } /* reschedule state queue work to run as soon as possible */ cancel_delayed_work_sync(&phydev->state_queue); @@ -904,10 +909,10 @@ void phy_state_machine(struct work_struct *work) phydev->adjust_link(phydev->attached_dev); break; case PHY_RUNNING: - /* Only register a CHANGE if we are polling or ignoring - * interrupts and link changed since latest checking. + /* Only register a CHANGE if we are polling and link changed + * since latest checking. */ - if (!phy_interrupt_is_valid(phydev)) { + if (phydev->irq == PHY_POLL) { old_link = phydev->link; err = phy_read_status(phydev); if (err) @@ -995,18 +1000,25 @@ void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - dev_dbg(&phydev->dev, "PHY state change %s -> %s\n", - phy_state_to_str(old_state), phy_state_to_str(phydev->state)); + phydev_dbg(phydev, "PHY state change %s -> %s\n", + phy_state_to_str(old_state), + phy_state_to_str(phydev->state)); - queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, - PHY_STATE_TIME * HZ); + /* Only re-schedule a PHY state machine change if we are polling the + * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving + * between states from phy_mac_interrupt() + */ + if (phydev->irq == PHY_POLL) + queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, + PHY_STATE_TIME * HZ); } void phy_mac_interrupt(struct phy_device *phydev, int new_link) { - cancel_work_sync(&phydev->phy_queue); phydev->link = new_link; - schedule_work(&phydev->phy_queue); + + /* Trigger a state machine change */ + queue_work(system_power_efficient_wq, &phydev->phy_queue); } EXPORT_SYMBOL(phy_mac_interrupt); @@ -1028,7 +1040,6 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, * @phydev: The PHY device bus * @prtad: MMD Address * @devad: MMD DEVAD - * @addr: PHY address on the MII bus * * Description: it reads data from the MMD registers (clause 22 to access to * clause 45) of the specified phy address. @@ -1038,14 +1049,14 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, * 3) Write reg 13 // MMD Data Command for MMD DEVAD * 3) Read reg 14 // Read MMD data */ -int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr) +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad) { struct phy_driver *phydrv = phydev->drv; + int addr = phydev->mdio.addr; int value = -1; if (!phydrv->read_mmd_indirect) { - struct mii_bus *bus = phydev->bus; + struct mii_bus *bus = phydev->mdio.bus; mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, prtad, devad, addr); @@ -1065,7 +1076,6 @@ EXPORT_SYMBOL(phy_read_mmd_indirect); * @phydev: The PHY device * @prtad: MMD Address * @devad: MMD DEVAD - * @addr: PHY address on the MII bus * @data: data to write in the MMD register * * Description: Write data from the MMD registers of the specified @@ -1077,12 +1087,13 @@ EXPORT_SYMBOL(phy_read_mmd_indirect); * 3) Write reg 14 // Write MMD data */ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr, u32 data) + int devad, u32 data) { struct phy_driver *phydrv = phydev->drv; + int addr = phydev->mdio.addr; if (!phydrv->write_mmd_indirect) { - struct mii_bus *bus = phydev->bus; + struct mii_bus *bus = phydev->mdio.bus; mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, prtad, devad, addr); @@ -1129,7 +1140,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* First check if the EEE ability is supported */ eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, - MDIO_MMD_PCS, phydev->addr); + MDIO_MMD_PCS); if (eee_cap <= 0) goto eee_exit_err; @@ -1141,12 +1152,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) * the EEE advertising registers. */ eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (eee_lp <= 0) goto eee_exit_err; eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, - MDIO_MMD_AN, phydev->addr); + MDIO_MMD_AN); if (eee_adv <= 0) goto eee_exit_err; @@ -1160,15 +1171,13 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) * clock while it is signaling LPI. */ int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1, - MDIO_MMD_PCS, - phydev->addr); + MDIO_MMD_PCS); if (val < 0) return val; val |= MDIO_PCS_CTRL1_CLKSTOP_EN; phy_write_mmd_indirect(phydev, MDIO_CTRL1, - MDIO_MMD_PCS, phydev->addr, - val); + MDIO_MMD_PCS, val); } return 0; /* EEE supported */ @@ -1187,8 +1196,7 @@ EXPORT_SYMBOL(phy_init_eee); */ int phy_get_eee_err(struct phy_device *phydev) { - return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, - MDIO_MMD_PCS, phydev->addr); + return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS); } EXPORT_SYMBOL(phy_get_eee_err); @@ -1205,22 +1213,19 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) int val; /* Get Supported EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, - MDIO_MMD_PCS, phydev->addr); + val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS); if (val < 0) return val; data->supported = mmd_eee_cap_to_ethtool_sup_t(val); /* Get advertisement EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, - MDIO_MMD_AN, phydev->addr); + val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN); if (val < 0) return val; data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); /* Get LP advertisement EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, - MDIO_MMD_AN, phydev->addr); + val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN); if (val < 0) return val; data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); @@ -1240,8 +1245,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); - phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, - phydev->addr, val); + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val); return 0; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0bfbabad4431..bad3f005faee 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -43,15 +43,31 @@ MODULE_LICENSE("GPL"); void phy_device_free(struct phy_device *phydev) { - put_device(&phydev->dev); + put_device(&phydev->mdio.dev); } EXPORT_SYMBOL(phy_device_free); +static void phy_mdio_device_free(struct mdio_device *mdiodev) +{ + struct phy_device *phydev; + + phydev = container_of(mdiodev, struct phy_device, mdio); + phy_device_free(phydev); +} + static void phy_device_release(struct device *dev) { kfree(to_phy_device(dev)); } +static void phy_mdio_device_remove(struct mdio_device *mdiodev) +{ + struct phy_device *phydev; + + phydev = container_of(mdiodev, struct phy_device, mdio); + phy_device_remove(phydev); +} + enum genphy_driver { GENPHY_DRV_1G, GENPHY_DRV_10G, @@ -63,9 +79,118 @@ static struct phy_driver genphy_driver[GENPHY_DRV_MAX]; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); +#ifdef CONFIG_PM +static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) +{ + struct device_driver *drv = phydev->mdio.dev.driver; + struct phy_driver *phydrv = to_phy_driver(drv); + struct net_device *netdev = phydev->attached_dev; + + if (!drv || !phydrv->suspend) + return false; + + /* PHY not attached? May suspend if the PHY has not already been + * suspended as part of a prior call to phy_disconnect() -> + * phy_detach() -> phy_suspend() because the parent netdev might be the + * MDIO bus driver and clock gated at this point. + */ + if (!netdev) + return !phydev->suspended; + + /* Don't suspend PHY if the attached netdev parent may wakeup. + * The parent may point to a PCI device, as in tg3 driver. + */ + if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) + return false; + + /* Also don't suspend PHY if the netdev itself may wakeup. This + * is the case for devices w/o underlaying pwr. mgmt. aware bus, + * e.g. SoC devices. + */ + if (device_may_wakeup(&netdev->dev)) + return false; + + return true; +} + +static int mdio_bus_phy_suspend(struct device *dev) +{ + struct phy_device *phydev = to_phy_device(dev); + + /* We must stop the state machine manually, otherwise it stops out of + * control, possibly with the phydev->lock held. Upon resume, netdev + * may call phy routines that try to grab the same lock, and that may + * lead to a deadlock. + */ + if (phydev->attached_dev && phydev->adjust_link) + phy_stop_machine(phydev); + + if (!mdio_bus_phy_may_suspend(phydev)) + return 0; + + return phy_suspend(phydev); +} + +static int mdio_bus_phy_resume(struct device *dev) +{ + struct phy_device *phydev = to_phy_device(dev); + int ret; + + if (!mdio_bus_phy_may_suspend(phydev)) + goto no_resume; + + ret = phy_resume(phydev); + if (ret < 0) + return ret; + +no_resume: + if (phydev->attached_dev && phydev->adjust_link) + phy_start_machine(phydev); + + return 0; +} + +static int mdio_bus_phy_restore(struct device *dev) +{ + struct phy_device *phydev = to_phy_device(dev); + struct net_device *netdev = phydev->attached_dev; + int ret; + + if (!netdev) + return 0; + + ret = phy_init_hw(phydev); + if (ret < 0) + return ret; + + /* The PHY needs to renegotiate. */ + phydev->link = 0; + phydev->state = PHY_UP; + + phy_start_machine(phydev); + + return 0; +} + +static const struct dev_pm_ops mdio_bus_phy_pm_ops = { + .suspend = mdio_bus_phy_suspend, + .resume = mdio_bus_phy_resume, + .freeze = mdio_bus_phy_suspend, + .thaw = mdio_bus_phy_resume, + .restore = mdio_bus_phy_restore, +}; + +#define MDIO_BUS_PHY_PM_OPS (&mdio_bus_phy_pm_ops) + +#else + +#define MDIO_BUS_PHY_PM_OPS NULL + +#endif /* CONFIG_PM */ + /** * phy_register_fixup - creates a new phy_fixup and adds it to the list - * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID) + * @bus_id: A string which matches phydev->mdio.dev.bus_id (or PHY_ANY_ID) * @phy_uid: Used to match against phydev->phy_id (the UID of the PHY) * It can also be PHY_ANY_UID * @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before @@ -114,7 +239,7 @@ EXPORT_SYMBOL(phy_register_fixup_for_id); */ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup) { - if (strcmp(fixup->bus_id, dev_name(&phydev->dev)) != 0) + if (strcmp(fixup->bus_id, phydev_name(phydev)) != 0) if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0) return 0; @@ -148,18 +273,59 @@ static int phy_scan_fixups(struct phy_device *phydev) return 0; } +static int phy_bus_match(struct device *dev, struct device_driver *drv) +{ + struct phy_device *phydev = to_phy_device(dev); + struct phy_driver *phydrv = to_phy_driver(drv); + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); + int i; + + if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)) + return 0; + + if (phydrv->match_phy_device) + return phydrv->match_phy_device(phydev); + + if (phydev->is_c45) { + for (i = 1; i < num_ids; i++) { + if (!(phydev->c45_ids.devices_in_package & (1 << i))) + continue; + + if ((phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->c45_ids.device_ids[i] & + phydrv->phy_id_mask)) + return 1; + } + return 0; + } else { + return (phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->phy_id & phydrv->phy_id_mask); + } +} + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { struct phy_device *dev; + struct mdio_device *mdiodev; /* We allocate the device, and initialize the default values */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); - dev->dev.release = phy_device_release; + mdiodev = &dev->mdio; + mdiodev->dev.release = phy_device_release; + mdiodev->dev.parent = &bus->dev; + mdiodev->dev.bus = &mdio_bus_type; + mdiodev->bus = bus; + mdiodev->pm_ops = MDIO_BUS_PHY_PM_OPS; + mdiodev->bus_match = phy_bus_match; + mdiodev->addr = addr; + mdiodev->flags = MDIO_DEVICE_FLAG_PHY; + mdiodev->device_free = phy_mdio_device_free; + mdiodev->device_remove = phy_mdio_device_remove; dev->speed = 0; dev->duplex = -1; @@ -171,15 +337,11 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, dev->autoneg = AUTONEG_ENABLE; dev->is_c45 = is_c45; - dev->addr = addr; dev->phy_id = phy_id; if (c45_ids) dev->c45_ids = *c45_ids; - dev->bus = bus; - dev->dev.parent = &bus->dev; - dev->dev.bus = &mdio_bus_type; - dev->irq = bus->irq ? bus->irq[addr] : PHY_POLL; - dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr); + dev->irq = bus->irq[addr]; + dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); dev->state = PHY_DOWN; @@ -199,7 +361,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, */ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); - device_initialize(&dev->dev); + device_initialize(&mdiodev->dev); return dev; } @@ -373,6 +535,48 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) } EXPORT_SYMBOL(get_phy_device); +static ssize_t +phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); +} +static DEVICE_ATTR_RO(phy_id); + +static ssize_t +phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + const char *mode = NULL; + + if (phy_is_internal(phydev)) + mode = "internal"; + else + mode = phy_modes(phydev->interface); + + return sprintf(buf, "%s\n", mode); +} +static DEVICE_ATTR_RO(phy_interface); + +static ssize_t +phy_has_fixups_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "%d\n", phydev->has_fixups); +} +static DEVICE_ATTR_RO(phy_has_fixups); + +static struct attribute *phy_dev_attrs[] = { + &dev_attr_phy_id.attr, + &dev_attr_phy_interface.attr, + &dev_attr_phy_has_fixups.attr, + NULL, +}; +ATTRIBUTE_GROUPS(phy_dev); + /** * phy_device_register - Register the phy device on the MDIO bus * @phydev: phy_device structure to be added to the MDIO bus @@ -381,28 +585,29 @@ int phy_device_register(struct phy_device *phydev) { int err; - /* Don't register a phy if one is already registered at this address */ - if (phydev->bus->phy_map[phydev->addr]) - return -EINVAL; - phydev->bus->phy_map[phydev->addr] = phydev; + err = mdiobus_register_device(&phydev->mdio); + if (err) + return err; /* Run all of the fixups for this PHY */ err = phy_scan_fixups(phydev); if (err) { - pr_err("PHY %d failed to initialize\n", phydev->addr); + pr_err("PHY %d failed to initialize\n", phydev->mdio.addr); goto out; } - err = device_add(&phydev->dev); + phydev->mdio.dev.groups = phy_dev_groups; + + err = device_add(&phydev->mdio.dev); if (err) { - pr_err("PHY %d failed to add\n", phydev->addr); + pr_err("PHY %d failed to add\n", phydev->mdio.addr); goto out; } return 0; out: - phydev->bus->phy_map[phydev->addr] = NULL; + mdiobus_unregister_device(&phydev->mdio); return err; } EXPORT_SYMBOL(phy_device_register); @@ -417,11 +622,8 @@ EXPORT_SYMBOL(phy_device_register); */ void phy_device_remove(struct phy_device *phydev) { - struct mii_bus *bus = phydev->bus; - int addr = phydev->addr; - - device_del(&phydev->dev); - bus->phy_map[addr] = NULL; + device_del(&phydev->mdio.dev); + mdiobus_unregister_device(&phydev->mdio); } EXPORT_SYMBOL(phy_device_remove); @@ -431,11 +633,13 @@ EXPORT_SYMBOL(phy_device_remove); */ struct phy_device *phy_find_first(struct mii_bus *bus) { + struct phy_device *phydev; int addr; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (bus->phy_map[addr]) - return bus->phy_map[addr]; + phydev = mdiobus_get_phy(bus, addr); + if (phydev) + return phydev; } return NULL; } @@ -607,6 +811,33 @@ int phy_init_hw(struct phy_device *phydev) } EXPORT_SYMBOL(phy_init_hw); +void phy_attached_info(struct phy_device *phydev) +{ + phy_attached_print(phydev, NULL); +} +EXPORT_SYMBOL(phy_attached_info); + +#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" +void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) +{ + if (!fmt) { + dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", + phydev->drv->name, phydev_name(phydev), + phydev->irq); + } else { + va_list ap; + + dev_info(&phydev->mdio.dev, ATTACHED_FMT, + phydev->drv->name, phydev_name(phydev), + phydev->irq); + + va_start(ap, fmt); + vprintk(fmt, ap); + va_end(ap); + } +} +EXPORT_SYMBOL(phy_attached_print); + /** * phy_attach_direct - attach a network device to a given PHY device pointer * @dev: network device to attach @@ -625,8 +856,8 @@ EXPORT_SYMBOL(phy_init_hw); int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, u32 flags, phy_interface_t interface) { - struct mii_bus *bus = phydev->bus; - struct device *d = &phydev->dev; + struct mii_bus *bus = phydev->mdio.bus; + struct device *d = &phydev->mdio.dev; int err; if (!try_module_get(bus->owner)) { @@ -641,9 +872,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, */ if (!d->driver) { if (phydev->is_c45) - d->driver = &genphy_driver[GENPHY_DRV_10G].driver; + d->driver = + &genphy_driver[GENPHY_DRV_10G].mdiodrv.driver; else - d->driver = &genphy_driver[GENPHY_DRV_1G].driver; + d->driver = + &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; err = d->driver->probe(d); if (err >= 0) @@ -668,6 +901,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phydev->state = PHY_READY; + /* Initial carrier state is off as the phy is about to be + * (re)initialized. + */ + netif_carrier_off(phydev->attached_dev); + /* Do initial configuration here, now that * we have certain key parameters * (dev_flags and interface) @@ -744,8 +982,9 @@ void phy_detach(struct phy_device *phydev) * real driver could be loaded */ for (i = 0; i < ARRAY_SIZE(genphy_driver); i++) { - if (phydev->dev.driver == &genphy_driver[i].driver) { - device_release_driver(&phydev->dev); + if (phydev->mdio.dev.driver == + &genphy_driver[i].mdiodrv.driver) { + device_release_driver(&phydev->mdio.dev); break; } } @@ -754,16 +993,16 @@ void phy_detach(struct phy_device *phydev) * The phydev might go away on the put_device() below, so avoid * a use-after-free bug by reading the underlying bus first. */ - bus = phydev->bus; + bus = phydev->mdio.bus; - put_device(&phydev->dev); + put_device(&phydev->mdio.dev); module_put(bus->owner); } EXPORT_SYMBOL(phy_detach); int phy_suspend(struct phy_device *phydev) { - struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); + struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; int ret = 0; @@ -786,7 +1025,7 @@ EXPORT_SYMBOL(phy_suspend); int phy_resume(struct phy_device *phydev) { - struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); + struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; if (phydrv->resume) @@ -1303,7 +1542,7 @@ EXPORT_SYMBOL(phy_set_max_speed); static void of_set_phy_supported(struct phy_device *phydev) { - struct device_node *node = phydev->dev.of_node; + struct device_node *node = phydev->mdio.dev.of_node; u32 max_speed; if (!IS_ENABLED(CONFIG_OF_MDIO)) @@ -1327,7 +1566,7 @@ static void of_set_phy_supported(struct phy_device *phydev) static int phy_probe(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); - struct device_driver *drv = phydev->dev.driver; + struct device_driver *drv = phydev->mdio.dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); int err = 0; @@ -1382,17 +1621,20 @@ static int phy_remove(struct device *dev) /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register + * @owner: module owning this PHY */ -int phy_driver_register(struct phy_driver *new_driver) +int phy_driver_register(struct phy_driver *new_driver, struct module *owner) { int retval; - new_driver->driver.name = new_driver->name; - new_driver->driver.bus = &mdio_bus_type; - new_driver->driver.probe = phy_probe; - new_driver->driver.remove = phy_remove; + new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; + new_driver->mdiodrv.driver.name = new_driver->name; + new_driver->mdiodrv.driver.bus = &mdio_bus_type; + new_driver->mdiodrv.driver.probe = phy_probe; + new_driver->mdiodrv.driver.remove = phy_remove; + new_driver->mdiodrv.driver.owner = owner; - retval = driver_register(&new_driver->driver); + retval = driver_register(&new_driver->mdiodrv.driver); if (retval) { pr_err("%s: Error %d in registering driver\n", new_driver->name, retval); @@ -1406,12 +1648,13 @@ int phy_driver_register(struct phy_driver *new_driver) } EXPORT_SYMBOL(phy_driver_register); -int phy_drivers_register(struct phy_driver *new_driver, int n) +int phy_drivers_register(struct phy_driver *new_driver, int n, + struct module *owner) { int i, ret = 0; for (i = 0; i < n; i++) { - ret = phy_driver_register(new_driver + i); + ret = phy_driver_register(new_driver + i, owner); if (ret) { while (i-- > 0) phy_driver_unregister(new_driver + i); @@ -1424,7 +1667,7 @@ EXPORT_SYMBOL(phy_drivers_register); void phy_driver_unregister(struct phy_driver *drv) { - driver_unregister(&drv->driver); + driver_unregister(&drv->mdiodrv.driver); } EXPORT_SYMBOL(phy_driver_unregister); @@ -1452,7 +1695,6 @@ static struct phy_driver genphy_driver[] = { .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE, }, }, { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, @@ -1464,7 +1706,6 @@ static struct phy_driver genphy_driver[] = { .read_status = gen10g_read_status, .suspend = gen10g_suspend, .resume = gen10g_resume, - .driver = {.owner = THIS_MODULE, }, } }; static int __init phy_init(void) @@ -1476,7 +1717,7 @@ static int __init phy_init(void) return rc; rc = phy_drivers_register(genphy_driver, - ARRAY_SIZE(genphy_driver)); + ARRAY_SIZE(genphy_driver), THIS_MODULE); if (rc) mdio_bus_exit(); diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index be4c6f7c3645..d470db89e8dd 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -122,7 +122,6 @@ static struct phy_driver qs6612_driver[] = { { .read_status = genphy_read_status, .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(qs6612_driver); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 43ab691362d4..aadd6e9f54ad 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -124,7 +124,6 @@ static struct phy_driver realtek_drvs[] = { .flags = PHY_HAS_INTERRUPT, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", @@ -135,7 +134,6 @@ static struct phy_driver realtek_drvs[] = { .read_status = &genphy_read_status, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", @@ -148,7 +146,6 @@ static struct phy_driver realtek_drvs[] = { .config_intr = rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc915, .name = "RTL8211E Gigabit Ethernet", @@ -161,7 +158,6 @@ static struct phy_driver realtek_drvs[] = { .config_intr = &rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc916, .name = "RTL8211F Gigabit Ethernet", @@ -175,7 +171,6 @@ static struct phy_driver realtek_drvs[] = { .config_intr = &rtl8211f_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = { .owner = THIS_MODULE }, }, }; diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index dc2da8770918..2e21e9366f76 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -24,6 +24,10 @@ #include <linux/netdevice.h> #include <linux/smscphy.h> +struct smsc_phy_priv { + bool energy_enable; +}; + static int smsc_phy_config_intr(struct phy_device *phydev) { int rc = phy_write (phydev, MII_LAN83C185_IM, @@ -43,19 +47,14 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_init(struct phy_device *phydev) { - int __maybe_unused len; - struct device *dev __maybe_unused = &phydev->dev; - struct device_node *of_node __maybe_unused = dev->of_node; + struct smsc_phy_priv *priv = phydev->priv; + int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); - int enable_energy = 1; if (rc < 0) return rc; - if (of_find_property(of_node, "smsc,disable-energy-detect", &len)) - enable_energy = 0; - - if (enable_energy) { + if (priv->energy_enable) { /* Enable energy detect mode for this SMSC Transceivers */ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); @@ -110,10 +109,13 @@ static int lan911x_config_init(struct phy_device *phydev) */ static int lan87xx_read_status(struct phy_device *phydev) { + struct smsc_phy_priv *priv = phydev->priv; + int err = genphy_read_status(phydev); - int i; - if (!phydev->link) { + if (!phydev->link && priv->energy_enable) { + int i; + /* Disable EDPD to wake up PHY */ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) @@ -149,6 +151,26 @@ static int lan87xx_read_status(struct phy_device *phydev) return err; } +static int smsc_phy_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct device_node *of_node = dev->of_node; + struct smsc_phy_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->energy_enable = true; + + if (of_property_read_bool(of_node, "smsc,disable-energy-detect")) + priv->energy_enable = false; + + phydev->priv = priv; + + return 0; +} + static struct phy_driver smsc_phy_driver[] = { { .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ @@ -159,6 +181,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -171,8 +195,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */ .phy_id_mask = 0xfffffff0, @@ -182,6 +204,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -194,8 +218,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */ .phy_id_mask = 0xfffffff0, @@ -205,6 +227,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, @@ -217,8 +241,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */ .phy_id_mask = 0xfffffff0, @@ -228,6 +250,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -239,8 +263,6 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, - - .driver = { .owner = THIS_MODULE, } }, { .phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */ .phy_id_mask = 0xfffffff0, @@ -250,6 +272,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, @@ -262,8 +286,29 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, +}, { + .phy_id = 0x0007c110, + .phy_id_mask = 0xfffffff0, + .name = "SMSC LAN8740", - .driver = { .owner = THIS_MODULE, } + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause + | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + + .probe = smsc_phy_probe, + + /* basic functions */ + .config_aneg = genphy_config_aneg, + .read_status = lan87xx_read_status, + .config_init = smsc_phy_config_init, + .soft_reset = smsc_phy_reset, + + /* IRQ related */ + .ack_interrupt = smsc_phy_ack_interrupt, + .config_intr = smsc_phy_config_intr, + + .suspend = genphy_suspend, + .resume = genphy_resume, } }; module_phy_driver(smsc_phy_driver); @@ -278,6 +323,7 @@ static struct mdio_device_id __maybe_unused smsc_tbl[] = { { 0x0007c0c0, 0xfffffff0 }, { 0x0007c0d0, 0xfffffff0 }, { 0x0007c0f0, 0xfffffff0 }, + { 0x0007c110, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index c72c42206850..b5d50d458728 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -18,6 +18,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> +#include <linux/of_gpio.h> #include <linux/spi/spi.h> @@ -74,6 +77,7 @@ #define KS8995_REGS_SIZE 0x80 #define KSZ8864_REGS_SIZE 0x100 +#define KSZ8795_REGS_SIZE 0x100 #define ID1_CHIPID_M 0xf #define ID1_CHIPID_S 4 @@ -82,15 +86,63 @@ #define ID1_START_SW 1 /* start the switch */ #define FAMILY_KS8995 0x95 +#define FAMILY_KSZ8795 0x87 #define CHIPID_M 0 +#define KS8995_CHIP_ID 0x00 +#define KSZ8864_CHIP_ID 0x01 +#define KSZ8795_CHIP_ID 0x09 #define KS8995_CMD_WRITE 0x02U #define KS8995_CMD_READ 0x03U #define KS8995_RESET_DELAY 10 /* usec */ +enum ks8995_chip_variant { + ks8995, + ksz8864, + ksz8795, + max_variant +}; + +struct ks8995_chip_params { + char *name; + int family_id; + int chip_id; + int regs_size; + int addr_width; + int addr_shift; +}; + +static const struct ks8995_chip_params ks8995_chip[] = { + [ks8995] = { + .name = "KS8995MA", + .family_id = FAMILY_KS8995, + .chip_id = KS8995_CHIP_ID, + .regs_size = KS8995_REGS_SIZE, + .addr_width = 8, + .addr_shift = 0, + }, + [ksz8864] = { + .name = "KSZ8864RMN", + .family_id = FAMILY_KS8995, + .chip_id = KSZ8864_CHIP_ID, + .regs_size = KSZ8864_REGS_SIZE, + .addr_width = 8, + .addr_shift = 0, + }, + [ksz8795] = { + .name = "KSZ8795CLX", + .family_id = FAMILY_KSZ8795, + .chip_id = KSZ8795_CHIP_ID, + .regs_size = KSZ8795_REGS_SIZE, + .addr_width = 12, + .addr_shift = 1, + }, +}; + struct ks8995_pdata { - /* not yet implemented */ + int reset_gpio; + enum of_gpio_flags reset_gpio_flags; }; struct ks8995_switch { @@ -98,7 +150,17 @@ struct ks8995_switch { struct mutex lock; struct ks8995_pdata *pdata; struct bin_attribute regs_attr; + const struct ks8995_chip_params *chip; + int revision_id; +}; + +static const struct spi_device_id ks8995_id[] = { + {"ks8995", ks8995}, + {"ksz8864", ksz8864}, + {"ksz8795", ksz8795}, + { } }; +MODULE_DEVICE_TABLE(spi, ks8995_id); static inline u8 get_chip_id(u8 val) { @@ -110,20 +172,44 @@ static inline u8 get_chip_rev(u8 val) return (val >> ID1_REVISION_S) & ID1_REVISION_M; } +/* create_spi_cmd - create a chip specific SPI command header + * @ks: pointer to switch instance + * @cmd: SPI command for switch + * @address: register address for command + * + * Different chip families use different bit pattern to address the switches + * registers: + * + * KS8995: 8bit command + 8bit address + * KSZ8795: 3bit command + 12bit address + 1bit TR (?) + */ +static inline __be16 create_spi_cmd(struct ks8995_switch *ks, int cmd, + unsigned address) +{ + u16 result = cmd; + + /* make room for address (incl. address shift) */ + result <<= ks->chip->addr_width + ks->chip->addr_shift; + /* add address */ + result |= address << ks->chip->addr_shift; + /* SPI protocol needs big endian */ + return cpu_to_be16(result); +} /* ------------------------------------------------------------------------ */ static int ks8995_read(struct ks8995_switch *ks, char *buf, unsigned offset, size_t count) { - u8 cmd[2]; + __be16 cmd; struct spi_transfer t[2]; struct spi_message m; int err; + cmd = create_spi_cmd(ks, KS8995_CMD_READ, offset); spi_message_init(&m); memset(&t, 0, sizeof(t)); - t[0].tx_buf = cmd; + t[0].tx_buf = &cmd; t[0].len = sizeof(cmd); spi_message_add_tail(&t[0], &m); @@ -131,9 +217,6 @@ static int ks8995_read(struct ks8995_switch *ks, char *buf, t[1].len = count; spi_message_add_tail(&t[1], &m); - cmd[0] = KS8995_CMD_READ; - cmd[1] = offset; - mutex_lock(&ks->lock); err = spi_sync(ks->spi, &m); mutex_unlock(&ks->lock); @@ -141,20 +224,20 @@ static int ks8995_read(struct ks8995_switch *ks, char *buf, return err ? err : count; } - static int ks8995_write(struct ks8995_switch *ks, char *buf, unsigned offset, size_t count) { - u8 cmd[2]; + __be16 cmd; struct spi_transfer t[2]; struct spi_message m; int err; + cmd = create_spi_cmd(ks, KS8995_CMD_WRITE, offset); spi_message_init(&m); memset(&t, 0, sizeof(t)); - t[0].tx_buf = cmd; + t[0].tx_buf = &cmd; t[0].len = sizeof(cmd); spi_message_add_tail(&t[0], &m); @@ -162,9 +245,6 @@ static int ks8995_write(struct ks8995_switch *ks, char *buf, t[1].len = count; spi_message_add_tail(&t[1], &m); - cmd[0] = KS8995_CMD_WRITE; - cmd[1] = offset; - mutex_lock(&ks->lock); err = spi_sync(ks->spi, &m); mutex_unlock(&ks->lock); @@ -233,6 +313,107 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, return ks8995_write(ks8995, buf, off, count); } +/* ks8995_get_revision - get chip revision + * @ks: pointer to switch instance + * + * Verify chip family and id and get chip revision. + */ +static int ks8995_get_revision(struct ks8995_switch *ks) +{ + int err; + u8 id0, id1, ksz8864_id; + + /* read family id */ + err = ks8995_read_reg(ks, KS8995_REG_ID0, &id0); + if (err) { + err = -EIO; + goto err_out; + } + + /* verify family id */ + if (id0 != ks->chip->family_id) { + dev_err(&ks->spi->dev, "chip family id mismatch: expected 0x%02x but 0x%02x read\n", + ks->chip->family_id, id0); + err = -ENODEV; + goto err_out; + } + + switch (ks->chip->family_id) { + case FAMILY_KS8995: + /* try reading chip id at CHIP ID1 */ + err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1); + if (err) { + err = -EIO; + goto err_out; + } + + /* verify chip id */ + if ((get_chip_id(id1) == CHIPID_M) && + (get_chip_id(id1) == ks->chip->chip_id)) { + /* KS8995MA */ + ks->revision_id = get_chip_rev(id1); + } else if (get_chip_id(id1) != CHIPID_M) { + /* KSZ8864RMN */ + err = ks8995_read_reg(ks, KS8995_REG_ID1, &ksz8864_id); + if (err) { + err = -EIO; + goto err_out; + } + + if ((ksz8864_id & 0x80) && + (ks->chip->chip_id == KSZ8864_CHIP_ID)) { + ks->revision_id = get_chip_rev(id1); + } + + } else { + dev_err(&ks->spi->dev, "unsupported chip id for KS8995 family: 0x%02x\n", + id1); + err = -ENODEV; + } + break; + case FAMILY_KSZ8795: + /* try reading chip id at CHIP ID1 */ + err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1); + if (err) { + err = -EIO; + goto err_out; + } + + if (get_chip_id(id1) == ks->chip->chip_id) { + ks->revision_id = get_chip_rev(id1); + } else { + dev_err(&ks->spi->dev, "unsupported chip id for KSZ8795 family: 0x%02x\n", + id1); + err = -ENODEV; + } + break; + default: + dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0); + err = -ENODEV; + break; + } +err_out: + return err; +} + +/* ks8995_parse_dt - setup platform data from devicetree + * @ks: pointer to switch instance + * + * Parses supported DT properties and sets up platform data + * accordingly. + */ +static void ks8995_parse_dt(struct ks8995_switch *ks) +{ + struct device_node *np = ks->spi->dev.of_node; + struct ks8995_pdata *pdata = ks->pdata; + + if (!np) + return; + + pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios", 0, + &pdata->reset_gpio_flags); +} + static const struct bin_attribute ks8995_registers_attr = { .attr = { .name = "registers", @@ -244,24 +425,58 @@ static const struct bin_attribute ks8995_registers_attr = { }; /* ------------------------------------------------------------------------ */ - static int ks8995_probe(struct spi_device *spi) { - struct ks8995_switch *ks; - struct ks8995_pdata *pdata; - u8 ids[2]; - int err; + struct ks8995_switch *ks; + int err; + int variant = spi_get_device_id(spi)->driver_data; - /* Chip description */ - pdata = spi->dev.platform_data; + if (variant >= max_variant) { + dev_err(&spi->dev, "bad chip variant %d\n", variant); + return -ENODEV; + } ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL); if (!ks) return -ENOMEM; mutex_init(&ks->lock); - ks->pdata = pdata; ks->spi = spi_dev_get(spi); + ks->chip = &ks8995_chip[variant]; + + if (ks->spi->dev.of_node) { + ks->pdata = devm_kzalloc(&spi->dev, sizeof(*ks->pdata), + GFP_KERNEL); + if (!ks->pdata) + return -ENOMEM; + + ks->pdata->reset_gpio = -1; + + ks8995_parse_dt(ks); + } + + if (!ks->pdata) + ks->pdata = spi->dev.platform_data; + + /* de-assert switch reset */ + if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) { + unsigned long flags; + + flags = (ks->pdata->reset_gpio_flags == OF_GPIO_ACTIVE_LOW ? + GPIOF_ACTIVE_LOW : 0); + + err = devm_gpio_request_one(&spi->dev, + ks->pdata->reset_gpio, + flags, "switch-reset"); + if (err) { + dev_err(&spi->dev, + "failed to get reset-gpios: %d\n", err); + return -EIO; + } + + gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 0); + } + spi_set_drvdata(spi, ks); spi->mode = SPI_MODE_0; @@ -272,39 +487,12 @@ static int ks8995_probe(struct spi_device *spi) return err; } - err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids)); - if (err < 0) { - dev_err(&spi->dev, "unable to read id registers, err=%d\n", - err); + err = ks8995_get_revision(ks); + if (err) return err; - } - - switch (ids[0]) { - case FAMILY_KS8995: - break; - default: - dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]); - return -ENODEV; - } + ks->regs_attr.size = ks->chip->regs_size; memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr)); - if (get_chip_id(ids[1]) != CHIPID_M) { - u8 val; - - /* Check if this is a KSZ8864RMN */ - err = ks8995_read(ks, &val, KSZ8864_REG_ID1, sizeof(val)); - if (err < 0) { - dev_err(&spi->dev, - "unable to read chip id register, err=%d\n", - err); - return err; - } - if ((val & 0x80) == 0) { - dev_err(&spi->dev, "unknown chip:%02x,0\n", ids[1]); - return err; - } - ks->regs_attr.size = KSZ8864_REGS_SIZE; - } err = ks8995_reset(ks); if (err) @@ -317,14 +505,8 @@ static int ks8995_probe(struct spi_device *spi) return err; } - if (get_chip_id(ids[1]) == CHIPID_M) { - dev_info(&spi->dev, - "KS8995 device found, Chip ID:%x, Revision:%x\n", - get_chip_id(ids[1]), get_chip_rev(ids[1])); - } else { - dev_info(&spi->dev, "KSZ8864 device found, Revision:%x\n", - get_chip_rev(ids[1])); - } + dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n", + ks->chip->name, ks->chip->chip_id, ks->revision_id); return 0; } @@ -335,17 +517,21 @@ static int ks8995_remove(struct spi_device *spi) sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr); + /* assert reset */ + if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) + gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1); + return 0; } /* ------------------------------------------------------------------------ */ - static struct spi_driver ks8995_driver = { .driver = { .name = "spi-ks8995", }, .probe = ks8995_probe, .remove = ks8995_remove, + .id_table = ks8995_id, }; module_spi_driver(ks8995_driver); diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 3fc199b773e6..d00cfb64529e 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -95,7 +95,6 @@ static struct phy_driver ste10xp_pdriver[] = { .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = {.owner = THIS_MODULE,} }, { .phy_id = STE100P_PHY_ID, .phy_id_mask = 0xffffffff, @@ -109,7 +108,6 @@ static struct phy_driver ste10xp_pdriver[] = { .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, - .driver = {.owner = THIS_MODULE,} } }; module_phy_driver(ste10xp_pdriver); diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 07463fcca212..fb2cef764e9a 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c @@ -108,7 +108,6 @@ static struct phy_driver teranetics_driver[] = { .config_aneg = teranetics_config_aneg, .read_status = teranetics_read_status, .match_phy_device = teranetics_match_phy_device, - .driver = { .owner = THIS_MODULE,}, }, }; diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index dd295dbaa074..2e37eb337d48 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -236,7 +236,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8244, .name = "Vitesse VSC8244", @@ -248,7 +247,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8514, .name = "Vitesse VSC8514", @@ -260,7 +258,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8574, .name = "Vitesse VSC8574", @@ -272,7 +269,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8601, .name = "Vitesse VSC8601", @@ -284,7 +280,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", @@ -296,7 +291,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { /* Vitesse 8221 */ .phy_id = PHY_ID_VSC8221, @@ -309,7 +303,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { /* Vitesse 8211 */ .phy_id = PHY_ID_VSC8211, @@ -322,7 +315,6 @@ static struct phy_driver vsc82xx_driver[] = { .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, } }; module_phy_driver(vsc82xx_driver); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 040b8978d6ca..9c4b41a4df7d 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -1249,6 +1249,7 @@ static void plip_attach (struct parport *port) struct net_device *dev; struct net_local *nl; char name[IFNAMSIZ]; + struct pardev_cb plip_cb; if ((parport[0] == -1 && (!timid || !port->devices)) || plip_searchfor(parport, port->number)) { @@ -1273,9 +1274,15 @@ static void plip_attach (struct parport *port) nl = netdev_priv(dev); nl->dev = dev; - nl->pardev = parport_register_device(port, dev->name, plip_preempt, - plip_wakeup, plip_interrupt, - 0, dev); + + memset(&plip_cb, 0, sizeof(plip_cb)); + plip_cb.private = dev; + plip_cb.preempt = plip_preempt; + plip_cb.wakeup = plip_wakeup; + plip_cb.irq_func = plip_interrupt; + + nl->pardev = parport_register_dev_model(port, dev->name, + &plip_cb, unit); if (!nl->pardev) { printk(KERN_ERR "%s: parport_register failed\n", name); @@ -1315,10 +1322,23 @@ static void plip_detach (struct parport *port) /* Nothing to do */ } +static int plip_probe(struct pardevice *par_dev) +{ + struct device_driver *drv = par_dev->dev.driver; + int len = strlen(drv->name); + + if (strncmp(par_dev->name, drv->name, len)) + return -ENODEV; + + return 0; +} + static struct parport_driver plip_driver = { - .name = "plip", - .attach = plip_attach, - .detach = plip_detach + .name = "plip", + .probe = plip_probe, + .match_port = plip_attach, + .detach = plip_detach, + .devmodel = true, }; static void __exit plip_cleanup_module (void) @@ -1326,8 +1346,6 @@ static void __exit plip_cleanup_module (void) struct net_device *dev; int i; - parport_unregister_driver (&plip_driver); - for (i=0; i < PLIP_MAX; i++) { if ((dev = dev_plip[i])) { struct net_local *nl = netdev_priv(dev); @@ -1339,6 +1357,8 @@ static void __exit plip_cleanup_module (void) dev_plip[i] = NULL; } } + + parport_unregister_driver(&plip_driver); } #ifndef MODULE diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 90868ca5e341..ae0905ed4a32 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr) return i < MAX_CALLID; } -static int add_chan(struct pppox_sock *sock) +static int add_chan(struct pppox_sock *sock, + struct pptp_addr *sa) { static int call_id; spin_lock(&chan_lock); - if (!sock->proto.pptp.src_addr.call_id) { + if (!sa->call_id) { call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); if (call_id == MAX_CALLID) { call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); if (call_id == MAX_CALLID) goto out_err; } - sock->proto.pptp.src_addr.call_id = call_id; - } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap)) + sa->call_id = call_id; + } else if (test_bit(sa->call_id, callid_bitmap)) { goto out_err; + } - set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); - rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock); + sock->proto.pptp.src_addr = *sa; + set_bit(sa->call_id, callid_bitmap); + rcu_assign_pointer(callid_sock[sa->call_id], sock); spin_unlock(&chan_lock); return 0; @@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; struct pppox_sock *po = pppox_sk(sk); - struct pptp_opt *opt = &po->proto.pptp; int error = 0; if (sockaddr_len < sizeof(struct sockaddr_pppox)) @@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); - opt->src_addr = sp->sa_addr.pptp; - if (add_chan(po)) + if (sk->sk_state & PPPOX_DEAD) { + error = -EALREADY; + goto out; + } + + if (sk->sk_state & PPPOX_BOUND) { error = -EBUSY; + goto out; + } + + if (add_chan(po, &sp->sa_addr.pptp)) + error = -EBUSY; + else + sk->sk_state |= PPPOX_BOUND; +out: release_sock(sk); return error; } @@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, } opt->dst_addr = sp->sa_addr.pptp; - sk->sk_state = PPPOX_CONNECTED; + sk->sk_state |= PPPOX_CONNECTED; end: release_sock(sk); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 2528331de193..00558e139584 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -758,6 +758,8 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) u64_stats_update_end(&pcpu_stats->syncp); skb->dev = team->dev; + } else if (res == RX_HANDLER_EXACT) { + this_cpu_inc(team->pcpu_stats->rx_nohandler); } else { this_cpu_inc(team->pcpu_stats->rx_dropped); } @@ -1807,7 +1809,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) struct team *team = netdev_priv(dev); struct team_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; - u32 rx_dropped = 0, tx_dropped = 0; + u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0; unsigned int start; int i; @@ -1828,14 +1830,16 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* - * rx_dropped & tx_dropped are u32, updated - * without syncp protection. + * rx_dropped, tx_dropped & rx_nohandler are u32, + * updated without syncp protection. */ rx_dropped += p->rx_dropped; tx_dropped += p->tx_dropped; + rx_nohandler += p->rx_nohandler; } stats->rx_dropped = rx_dropped; stats->tx_dropped = tx_dropped; + stats->rx_nohandler = rx_nohandler; return stats; } @@ -1872,10 +1876,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) struct team *team = netdev_priv(dev); struct team_port *port; - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) vlan_vid_del(port->dev, proto, vid); - rcu_read_unlock(); + mutex_unlock(&team->lock); return 0; } diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 5f18fcb8dcc7..224e7d82de6d 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -98,7 +98,7 @@ static void ax88172a_status(struct usbnet *dev, struct urb *urb) static int ax88172a_init_mdio(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; - int ret, i; + int ret; priv->mdio = mdiobus_alloc(); if (!priv->mdio) { @@ -114,25 +114,15 @@ static int ax88172a_init_mdio(struct usbnet *dev) snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); - priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mdio->irq) { - ret = -ENOMEM; - goto mfree; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - priv->mdio->irq[i] = PHY_POLL; - ret = mdiobus_register(priv->mdio); if (ret) { netdev_err(dev->net, "Could not register MDIO bus\n"); - goto ifree; + goto mfree; } netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id); return 0; -ifree: - kfree(priv->mdio->irq); mfree: mdiobus_free(priv->mdio); return ret; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 3da70bf9936a..7cba2c3759df 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -160,6 +160,12 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) info->u = header.usb_cdc_union_desc; info->header = header.usb_cdc_header_desc; info->ether = header.usb_cdc_ether_desc; + if (!info->u) { + if (rndis) + goto skip; + else /* in that case a quirk is mandatory */ + goto bad_desc; + } /* we need a master/control interface (what we're * probed with) and a slave/data interface; union * descriptors sort this all out. @@ -256,7 +262,7 @@ skip: goto bad_desc; } - } else if (!info->header || !info->u || (!rndis && !info->ether)) { + } else if (!info->header || (!rndis && !info->ether)) { dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n", info->header ? "" : "header ", info->u ? "" : "union ", diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 226668ead0d8..1c299b8a162d 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -36,7 +36,7 @@ #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" -#define DRIVER_VERSION "1.0.1" +#define DRIVER_VERSION "1.0.2" #define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) @@ -462,32 +462,53 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u32 val; + u32 saved; int i, ret; + int retval; - ret = lan78xx_eeprom_confirm_not_busy(dev); - if (ret) - return ret; + /* depends on chip, some EEPROM pins are muxed with LED function. + * disable & restore LED function to access EEPROM. + */ + ret = lan78xx_read_reg(dev, HW_CFG, &val); + saved = val; + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) { + val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); + ret = lan78xx_write_reg(dev, HW_CFG, val); + } + + retval = lan78xx_eeprom_confirm_not_busy(dev); + if (retval) + return retval; for (i = 0; i < length; i++) { val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (unlikely(ret < 0)) - return -EIO; + if (unlikely(ret < 0)) { + retval = -EIO; + goto exit; + } - ret = lan78xx_wait_eeprom(dev); - if (ret < 0) - return ret; + retval = lan78xx_wait_eeprom(dev); + if (retval < 0) + goto exit; ret = lan78xx_read_reg(dev, E2P_DATA, &val); - if (unlikely(ret < 0)) - return -EIO; + if (unlikely(ret < 0)) { + retval = -EIO; + goto exit; + } data[i] = val & 0xFF; offset++; } - return 0; + retval = 0; +exit: + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) + ret = lan78xx_write_reg(dev, HW_CFG, saved); + + return retval; } static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, @@ -509,44 +530,67 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u32 val; + u32 saved; int i, ret; + int retval; - ret = lan78xx_eeprom_confirm_not_busy(dev); - if (ret) - return ret; + /* depends on chip, some EEPROM pins are muxed with LED function. + * disable & restore LED function to access EEPROM. + */ + ret = lan78xx_read_reg(dev, HW_CFG, &val); + saved = val; + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) { + val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); + ret = lan78xx_write_reg(dev, HW_CFG, val); + } + + retval = lan78xx_eeprom_confirm_not_busy(dev); + if (retval) + goto exit; /* Issue write/erase enable command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (unlikely(ret < 0)) - return -EIO; + if (unlikely(ret < 0)) { + retval = -EIO; + goto exit; + } - ret = lan78xx_wait_eeprom(dev); - if (ret < 0) - return ret; + retval = lan78xx_wait_eeprom(dev); + if (retval < 0) + goto exit; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = lan78xx_write_reg(dev, E2P_DATA, val); - if (ret < 0) - return ret; + if (ret < 0) { + retval = -EIO; + goto exit; + } /* Send "write" command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) - return ret; + if (ret < 0) { + retval = -EIO; + goto exit; + } - ret = lan78xx_wait_eeprom(dev); - if (ret < 0) - return ret; + retval = lan78xx_wait_eeprom(dev); + if (retval < 0) + goto exit; offset++; } - return 0; + retval = 0; +exit: + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) + ret = lan78xx_write_reg(dev, HW_CFG, saved); + + return retval; } static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, @@ -603,6 +647,59 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, return 0; } +static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + int i; + int ret; + u32 buf; + unsigned long timeout; + + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + + if (buf & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + + timeout = jiffies + HZ; + do { + udelay(1); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on OTP_PWR_DN completion"); + return -EIO; + } + } while (buf & OTP_PWR_DN_PWRDN_N_); + } + + /* set to BYTE program mode */ + ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + + for (i = 0; i < length; i++) { + ret = lan78xx_write_reg(dev, OTP_ADDR1, + ((offset + i) >> 8) & OTP_ADDR1_15_11); + ret = lan78xx_write_reg(dev, OTP_ADDR2, + ((offset + i) & OTP_ADDR2_10_3)); + ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); + ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + + timeout = jiffies + HZ; + do { + udelay(1); + ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "Timeout on OTP_STATUS completion"); + return -EIO; + } + } while (buf & OTP_STATUS_BUSY_); + } + + return 0; +} + static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { @@ -851,7 +948,6 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) if (!phydev->link && dev->link_on) { dev->link_on = false; - netif_carrier_off(dev->net); /* reset MAC */ ret = lan78xx_read_reg(dev, MAC_CR, &buf); @@ -861,6 +957,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) ret = lan78xx_write_reg(dev, MAC_CR, buf); if (unlikely(ret < 0)) return -EIO; + + phy_mac_interrupt(phydev, 0); } else if (phydev->link && !dev->link_on) { dev->link_on = true; @@ -900,7 +998,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv); ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); - netif_carrier_on(dev->net); + phy_mac_interrupt(phydev, 1); } return ret; @@ -969,7 +1067,7 @@ static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, (ee->offset == 0) && (ee->len == 512) && (data[0] == OTP_INDICATOR_1)) - return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); return -EINVAL; } @@ -1442,7 +1540,6 @@ done: static int lan78xx_mdio_init(struct lan78xx_net *dev) { int ret; - int i; dev->mdiobus = mdiobus_alloc(); if (!dev->mdiobus) { @@ -1458,16 +1555,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); - dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!dev->mdiobus->irq) { - ret = -ENOMEM; - goto exit1; - } - - /* handle our own interrupt */ - for (i = 0; i < PHY_MAX_ADDR; i++) - dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT; - switch (dev->devid & ID_REV_CHIP_ID_MASK_) { case 0x78000000: case 0x78500000: @@ -1479,13 +1566,11 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) ret = mdiobus_register(dev->mdiobus); if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); - goto exit2; + goto exit1; } netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id); return 0; -exit2: - kfree(dev->mdiobus->irq); exit1: mdiobus_free(dev->mdiobus); return ret; @@ -1494,7 +1579,6 @@ exit1: static void lan78xx_remove_mdio(struct lan78xx_net *dev) { mdiobus_unregister(dev->mdiobus); - kfree(dev->mdiobus->irq); mdiobus_free(dev->mdiobus); } @@ -1514,6 +1598,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) return -EIO; } + /* Enable PHY interrupts. + * We handle our own interrupt + */ + ret = phy_read(phydev, LAN88XX_INT_STS); + ret = phy_write(phydev, LAN88XX_INT_MASK, + LAN88XX_INT_MASK_MDINTPIN_EN_ | + LAN88XX_INT_MASK_LINK_CHANGE_); + + phydev->irq = PHY_IGNORE_INTERRUPT; + ret = phy_connect_direct(dev->net, phydev, lan78xx_link_status_change, PHY_INTERFACE_MODE_GMII); @@ -1536,14 +1630,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) SUPPORTED_Pause | SUPPORTED_Asym_Pause); genphy_config_aneg(phydev); - /* Workaround to enable PHY interrupt. - * phy_start_interrupts() is API for requesting and enabling - * PHY interrupt. However, USB-to-Ethernet device can't use - * request_irq() called in phy_start_interrupts(). - * Set PHY to PHY_HALTED and call phy_start() - * to make a call to phy_enable_interrupts() - */ - phy_stop(phydev); phy_start(phydev); netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); @@ -2177,7 +2263,9 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) if (skb2) { skb_queue_tail(&dev->txq_pend, skb2); - if (skb_queue_len(&dev->txq_pend) > 10) + /* throttle TX patch at slower than SUPER SPEED USB */ + if ((dev->udev->speed < USB_SPEED_SUPER) && + (skb_queue_len(&dev->txq_pend) > 10)) netif_stop_queue(net); } else { netif_dbg(dev, tx_err, dev->net, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d0b29733c021..23e9880791fc 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -885,6 +885,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ + {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index e691c7df1ac2..d1f78c2c97aa 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -25,12 +25,13 @@ #include <uapi/linux/mdio.h> #include <linux/mdio.h> #include <linux/usb/cdc.h> +#include <linux/suspend.h> /* Information for net-next */ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "2" +#define NET_VERSION "3" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -604,6 +605,9 @@ struct r8152 { struct delayed_work schedule; struct mii_if_info mii; struct mutex control; /* use for hw setting */ +#ifdef CONFIG_PM_SLEEP + struct notifier_block pm_notifier; +#endif struct rtl_ops { void (*init)(struct r8152 *); @@ -1938,7 +1942,6 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) __le32 tmp[2]; u32 ocp_data; - clear_bit(RTL8152_SET_RX_MODE, &tp->flags); netif_stop_queue(netdev); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; @@ -2424,8 +2427,6 @@ static void rtl_phy_reset(struct r8152 *tp) u16 data; int i; - clear_bit(PHY_RESET, &tp->flags); - data = r8152_mdio_read(tp, MII_BMCR); /* don't reset again before the previous one complete */ @@ -2455,23 +2456,23 @@ static void r8153_teredo_off(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); } -static void r8152b_disable_aldps(struct r8152 *tp) -{ - ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE); - msleep(20); -} - -static inline void r8152b_enable_aldps(struct r8152 *tp) +static void r8152_aldps_en(struct r8152 *tp, bool enable) { - ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | - LINKENA | DIS_SDSAVE); + if (enable) { + ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | + LINKENA | DIS_SDSAVE); + } else { + ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | + DIS_SDSAVE); + msleep(20); + } } static void rtl8152_disable(struct r8152 *tp) { - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); rtl_disable(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); } static void r8152b_hw_phy_cfg(struct r8152 *tp) @@ -2783,30 +2784,26 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } -static void r8153_disable_aldps(struct r8152 *tp) +static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; data = ocp_reg_read(tp, OCP_POWER_CFG); - data &= ~EN_ALDPS; - ocp_reg_write(tp, OCP_POWER_CFG, data); - msleep(20); -} - -static void r8153_enable_aldps(struct r8152 *tp) -{ - u16 data; - - data = ocp_reg_read(tp, OCP_POWER_CFG); - data |= EN_ALDPS; - ocp_reg_write(tp, OCP_POWER_CFG, data); + if (enable) { + data |= EN_ALDPS; + ocp_reg_write(tp, OCP_POWER_CFG, data); + } else { + data &= ~EN_ALDPS; + ocp_reg_write(tp, OCP_POWER_CFG, data); + msleep(20); + } } static void rtl8153_disable(struct r8152 *tp) { - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); rtl_disable(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); usb_enable_lpm(tp->udev); } @@ -2884,10 +2881,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr); - if (test_bit(PHY_RESET, &tp->flags)) { + if (test_and_clear_bit(PHY_RESET, &tp->flags)) { int i; - clear_bit(PHY_RESET, &tp->flags); for (i = 0; i < 50; i++) { msleep(20); if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) @@ -2896,7 +2892,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) } out: - return ret; } @@ -2905,9 +2900,9 @@ static void rtl8152_up(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); r8152b_exit_oob(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); } static void rtl8152_down(struct r8152 *tp) @@ -2918,9 +2913,9 @@ static void rtl8152_down(struct r8152 *tp) } r8152_power_cut_en(tp, false); - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); r8152b_enter_oob(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); } static void rtl8153_up(struct r8152 *tp) @@ -2929,9 +2924,9 @@ static void rtl8153_up(struct r8152 *tp) return; r8153_u1u2en(tp, false); - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); r8153_first_init(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); r8153_u2p3en(tp, true); r8153_u1u2en(tp, true); usb_enable_lpm(tp->udev); @@ -2947,9 +2942,9 @@ static void rtl8153_down(struct r8152 *tp) r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); r8153_enter_oob(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); } static bool rtl8152_in_nway(struct r8152 *tp) @@ -2983,7 +2978,6 @@ static void set_carrier(struct r8152 *tp) struct net_device *netdev = tp->netdev; u8 speed; - clear_bit(RTL8152_LINK_CHG, &tp->flags); speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { @@ -3026,20 +3020,18 @@ static void rtl_work_func_t(struct work_struct *work) goto out1; } - if (test_bit(RTL8152_LINK_CHG, &tp->flags)) + if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags)) set_carrier(tp); - if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) + if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev); /* don't schedule napi before linking */ - if (test_bit(SCHEDULE_NAPI, &tp->flags) && - netif_carrier_ok(tp->netdev)) { - clear_bit(SCHEDULE_NAPI, &tp->flags); + if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) && + netif_carrier_ok(tp->netdev)) napi_schedule(&tp->napi); - } - if (test_bit(PHY_RESET, &tp->flags)) + if (test_and_clear_bit(PHY_RESET, &tp->flags)) rtl_phy_reset(tp); mutex_unlock(&tp->control); @@ -3048,6 +3040,33 @@ out1: usb_autopm_put_interface(tp->intf); } +#ifdef CONFIG_PM_SLEEP +static int rtl_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct r8152 *tp = container_of(nb, struct r8152, pm_notifier); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + usb_autopm_get_interface(tp->intf); + break; + + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + usb_autopm_put_interface(tp->intf); + break; + + case PM_POST_RESTORE: + case PM_RESTORE_PREPARE: + default: + break; + } + + return NOTIFY_DONE; +} +#endif + static int rtl8152_open(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); @@ -3090,6 +3109,10 @@ static int rtl8152_open(struct net_device *netdev) mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); +#ifdef CONFIG_PM_SLEEP + tp->pm_notifier.notifier_call = rtl_notifier; + register_pm_notifier(&tp->pm_notifier); +#endif out: return res; @@ -3100,6 +3123,9 @@ static int rtl8152_close(struct net_device *netdev) struct r8152 *tp = netdev_priv(netdev); int res = 0; +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&tp->pm_notifier); +#endif napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); @@ -3238,7 +3264,7 @@ static void r8152b_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8152b_disable_aldps(tp); + r8152_aldps_en(tp, false); if (tp->version == RTL_VER_01) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); @@ -3260,7 +3286,7 @@ static void r8152b_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); r8152b_enable_eee(tp); - r8152b_enable_aldps(tp); + r8152_aldps_en(tp, true); r8152b_enable_fc(tp); rtl_tally_reset(tp); @@ -3278,7 +3304,7 @@ static void r8153_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8153_disable_aldps(tp); + r8153_aldps_en(tp, false); r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { @@ -3367,7 +3393,7 @@ static void r8153_init(struct r8152 *tp) EEE_SPDWN_EN); r8153_enable_eee(tp); - r8153_enable_aldps(tp); + r8153_aldps_en(tp, true); r8152b_enable_fc(tp); rtl_tally_reset(tp); r8153_u2p3en(tp, true); @@ -3525,6 +3551,14 @@ static int rtl8152_resume(struct usb_interface *intf) return 0; } +static int rtl8152_reset_resume(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + return rtl8152_resume(intf); +} + static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); @@ -4276,7 +4310,7 @@ static struct usb_driver rtl8152_driver = { .disconnect = rtl8152_disconnect, .suspend = rtl8152_suspend, .resume = rtl8152_resume, - .reset_resume = rtl8152_resume, + .reset_resume = rtl8152_reset_resume, .pre_reset = rtl8152_pre_reset, .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 767ab11a6e9f..fb0eae42bf39 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -146,6 +146,10 @@ struct virtnet_info { virtio_net_ctrl_ack ctrl_status; u8 ctrl_promisc; u8 ctrl_allmulti; + + /* Ethtool settings */ + u8 duplex; + u32 speed; }; struct padded_vnet_hdr { @@ -1376,6 +1380,60 @@ static void virtnet_get_channels(struct net_device *dev, channels->other_count = 0; } +/* Check if the user is trying to change anything besides speed/duplex */ +static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) +{ + struct ethtool_cmd diff1 = *cmd; + struct ethtool_cmd diff2 = {}; + + /* cmd is always set so we need to clear it, validate the port type + * and also without autonegotiation we can ignore advertising + */ + ethtool_cmd_speed_set(&diff1, 0); + diff2.port = PORT_OTHER; + diff1.advertising = 0; + diff1.duplex = 0; + diff1.cmd = 0; + + return !memcmp(&diff1, &diff2, sizeof(diff1)); +} + +static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct virtnet_info *vi = netdev_priv(dev); + u32 speed; + + speed = ethtool_cmd_speed(cmd); + /* don't allow custom speed and duplex */ + if (!ethtool_validate_speed(speed) || + !ethtool_validate_duplex(cmd->duplex) || + !virtnet_validate_ethtool_cmd(cmd)) + return -EINVAL; + vi->speed = speed; + vi->duplex = cmd->duplex; + + return 0; +} + +static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct virtnet_info *vi = netdev_priv(dev); + + ethtool_cmd_speed_set(cmd, vi->speed); + cmd->duplex = vi->duplex; + cmd->port = PORT_OTHER; + + return 0; +} + +static void virtnet_init_settings(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + + vi->speed = SPEED_UNKNOWN; + vi->duplex = DUPLEX_UNKNOWN; +} + static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1383,6 +1441,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, .get_ts_info = ethtool_op_get_ts_info, + .get_settings = virtnet_get_settings, + .set_settings = virtnet_set_settings, }; #define MIN_MTU 68 @@ -1855,6 +1915,8 @@ static int virtnet_probe(struct virtio_device *vdev) netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); + virtnet_init_settings(dev); + err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 417903715437..0cbf520cea77 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1380,10 +1380,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = true; goto rcd_done; } - new_dma_addr = dma_map_page(&adapter->pdev->dev - , rbi->page, - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + new_dma_addr = dma_map_page(&adapter->pdev->dev, + new_page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); if (dma_mapping_error(&adapter->pdev->dev, new_dma_addr)) { put_page(new_page); diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 4c58c83dc225..bdb8a6c0f8aa 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040400 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040500 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 56abdf224d35..9ce088bb28ab 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -32,7 +32,6 @@ #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/ip6_route.h> -#include <net/rtnetlink.h> #include <net/route.h> #include <net/addrconf.h> #include <net/l3mdev.h> @@ -742,7 +741,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev, } /* called under rcu_read_lock */ -static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) +static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) { struct fib_result res = { .tclassid = 0 }; struct net *net = dev_net(dev); @@ -750,9 +749,10 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) u8 flags = fl4->flowi4_flags; u8 scope = fl4->flowi4_scope; u8 tos = RT_FL_TOS(fl4); + int rc; if (unlikely(!fl4->daddr)) - return; + return 0; fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF; fl4->flowi4_iif = LOOPBACK_IFINDEX; @@ -760,7 +760,8 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); - if (!fib_lookup(net, fl4, &res, 0)) { + rc = fib_lookup(net, fl4, &res, 0); + if (!rc) { if (res.type == RTN_LOCAL) fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr; else @@ -770,6 +771,8 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) fl4->flowi4_flags = flags; fl4->flowi4_tos = orig_tos; fl4->flowi4_scope = scope; + + return rc; } #if IS_ENABLED(CONFIG_IPV6) @@ -873,6 +876,24 @@ static int vrf_fillinfo(struct sk_buff *skb, return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); } +static size_t vrf_get_slave_size(const struct net_device *bond_dev, + const struct net_device *slave_dev) +{ + return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */ +} + +static int vrf_fill_slave_info(struct sk_buff *skb, + const struct net_device *vrf_dev, + const struct net_device *slave_dev) +{ + struct net_vrf *vrf = netdev_priv(vrf_dev); + + if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id)) + return -EMSGSIZE; + + return 0; +} + static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { [IFLA_VRF_TABLE] = { .type = NLA_U32 }, }; @@ -886,6 +907,9 @@ static struct rtnl_link_ops vrf_link_ops __read_mostly = { .validate = vrf_validate, .fill_info = vrf_fillinfo, + .get_slave_size = vrf_get_slave_size, + .fill_slave_info = vrf_fill_slave_info, + .newlink = vrf_newlink, .dellink = vrf_dellink, .setup = vrf_setup, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index fecf7b6c732e..c963897e713d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -42,7 +42,7 @@ #include <net/netns/generic.h> #include <net/vxlan.h> #include <net/protocol.h> -#include <net/udp_tunnel.h> + #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/addrconf.h> @@ -73,7 +73,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static int vxlan_net_id; static struct rtnl_link_ops vxlan_link_ops; -static const u8 all_zeros_mac[ETH_ALEN]; +static const u8 all_zeros_mac[ETH_ALEN + 2]; static int vxlan_sock_add(struct vxlan_dev *vxlan); @@ -197,9 +197,9 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, #endif /* Virtual Network hash table head */ -static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id) +static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) { - return &vs->vni_list[hash_32(id, VNI_HASH_BITS)]; + return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; } /* Socket hash table head */ @@ -242,12 +242,16 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, return NULL; } -static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) +static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) { struct vxlan_dev *vxlan; - hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) { - if (vxlan->default_dst.remote_vni == id) + /* For flow based devices, map all packets to VNI 0 */ + if (vs->flags & VXLAN_F_COLLECT_METADATA) + vni = 0; + + hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { + if (vxlan->default_dst.remote_vni == vni) return vxlan; } @@ -255,7 +259,7 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) } /* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, +static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni, sa_family_t family, __be16 port, u32 flags) { @@ -265,7 +269,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, if (!vs) return NULL; - return vxlan_vs_find_vni(vs, id); + return vxlan_vs_find_vni(vs, vni); } /* Fill in neighbour message in skbuff. */ @@ -315,7 +319,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, nla_put_be16(skb, NDA_PORT, rdst->remote_port)) goto nla_put_failure; if (rdst->remote_vni != vxlan->default_dst.remote_vni && - nla_put_u32(skb, NDA_VNI, rdst->remote_vni)) + nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) goto nla_put_failure; if (rdst->remote_ifindex && nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) @@ -383,7 +387,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) }; struct vxlan_rdst remote = { .remote_ip = *ipa, /* goes to NDA_DST */ - .remote_vni = VXLAN_N_VID, + .remote_vni = cpu_to_be32(VXLAN_N_VID), }; vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); @@ -452,7 +456,7 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, /* caller should hold vxlan->hash_lock */ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, union vxlan_addr *ip, __be16 port, - __u32 vni, __u32 ifindex) + __be32 vni, __u32 ifindex) { struct vxlan_rdst *rd; @@ -469,7 +473,8 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, /* Replace destination of unicast mac */ static int vxlan_fdb_replace(struct vxlan_fdb *f, - union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) + union vxlan_addr *ip, __be16 port, __be32 vni, + __u32 ifindex) { struct vxlan_rdst *rd; @@ -480,6 +485,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); if (!rd) return 0; + + dst_cache_reset(&rd->dst_cache); rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; @@ -489,7 +496,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, - union vxlan_addr *ip, __be16 port, __u32 vni, + union vxlan_addr *ip, __be16 port, __be32 vni, __u32 ifindex, struct vxlan_rdst **rdp) { struct vxlan_rdst *rd; @@ -501,6 +508,12 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) return -ENOBUFS; + + if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { + kfree(rd); + return -ENOBUFS; + } + rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; @@ -515,7 +528,8 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, unsigned int off, struct vxlanhdr *vh, size_t hdrlen, - u32 data, struct gro_remcsum *grc, + __be32 vni_field, + struct gro_remcsum *grc, bool nopartial) { size_t start, offset; @@ -526,10 +540,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, if (!NAPI_GRO_CB(skb)->csum_valid) return NULL; - start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; - offset = start + ((data & VXLAN_RCO_UDP) ? - offsetof(struct udphdr, check) : - offsetof(struct tcphdr, check)); + start = vxlan_rco_start(vni_field); + offset = start + vxlan_rco_offset(vni_field); vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, start, offset, grc, nopartial); @@ -549,7 +561,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, int flush = 1; struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock, udp_offloads); - u32 flags; + __be32 flags; struct gro_remcsum grc; skb_gro_remcsum_init(&grc); @@ -565,11 +577,11 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); - flags = ntohl(vh->vx_flags); + flags = vh->vx_flags; if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), - ntohl(vh->vx_vni), &grc, + vh->vx_vni, &grc, !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL)); @@ -621,7 +633,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) int err; if (sa_family == AF_INET) { - err = udp_add_offload(&vs->udp_offloads); + err = udp_add_offload(net, &vs->udp_offloads); if (err) pr_warn("vxlan: udp_add_offload failed with status %d\n", err); } @@ -660,7 +672,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, - __be16 port, __u32 vni, __u32 ifindex, + __be16 port, __be32 vni, __u32 ifindex, __u8 ndm_flags) { struct vxlan_rdst *rd = NULL; @@ -749,8 +761,10 @@ static void vxlan_fdb_free(struct rcu_head *head) struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); struct vxlan_rdst *rd, *nd; - list_for_each_entry_safe(rd, nd, &f->remotes, list) + list_for_each_entry_safe(rd, nd, &f->remotes, list) { + dst_cache_destroy(&rd->dst_cache); kfree(rd); + } kfree(f); } @@ -767,7 +781,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) } static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, - union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex) + union vxlan_addr *ip, __be16 *port, __be32 *vni, + u32 *ifindex) { struct net *net = dev_net(vxlan->dev); int err; @@ -800,7 +815,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, if (tb[NDA_VNI]) { if (nla_len(tb[NDA_VNI]) != sizeof(u32)) return -EINVAL; - *vni = nla_get_u32(tb[NDA_VNI]); + *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); } else { *vni = vxlan->default_dst.remote_vni; } @@ -830,7 +845,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* struct net *net = dev_net(vxlan->dev); */ union vxlan_addr ip; __be16 port; - u32 vni, ifindex; + __be32 vni; + u32 ifindex; int err; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { @@ -867,7 +883,8 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct vxlan_rdst *rd = NULL; union vxlan_addr ip; __be16 port; - u32 vni, ifindex; + __be32 vni; + u32 ifindex; int err; err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); @@ -1122,54 +1139,65 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan) return ret; } -static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, - size_t hdrlen, u32 data, bool nopartial) +static bool vxlan_remcsum(struct vxlanhdr *unparsed, + struct sk_buff *skb, u32 vxflags) { size_t start, offset, plen; - if (skb->remcsum_offload) - return vh; + if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) + goto out; - start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; - offset = start + ((data & VXLAN_RCO_UDP) ? - offsetof(struct udphdr, check) : - offsetof(struct tcphdr, check)); + start = vxlan_rco_start(unparsed->vx_vni); + offset = start + vxlan_rco_offset(unparsed->vx_vni); - plen = hdrlen + offset + sizeof(u16); + plen = sizeof(struct vxlanhdr) + offset + sizeof(u16); if (!pskb_may_pull(skb, plen)) - return NULL; + return false; - vh = (struct vxlanhdr *)(udp_hdr(skb) + 1); + skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, + !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); +out: + unparsed->vx_flags &= ~VXLAN_HF_RCO; + unparsed->vx_vni &= VXLAN_VNI_MASK; + return true; +} - skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset, - nopartial); +static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, + struct vxlan_metadata *md, + struct metadata_dst *tun_dst) +{ + struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; - return vh; + if (!(unparsed->vx_flags & VXLAN_HF_GBP)) + goto out; + + md->gbp = ntohs(gbp->policy_id); + + if (tun_dst) + tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; + + if (gbp->dont_learn) + md->gbp |= VXLAN_GBP_DONT_LEARN; + + if (gbp->policy_applied) + md->gbp |= VXLAN_GBP_POLICY_APPLIED; + +out: + unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } -static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, - struct vxlan_metadata *md, u32 vni, +static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, + struct sk_buff *skb, struct vxlan_metadata *md, struct metadata_dst *tun_dst) { struct iphdr *oip = NULL; struct ipv6hdr *oip6 = NULL; - struct vxlan_dev *vxlan; struct pcpu_sw_netstats *stats; union vxlan_addr saddr; int err = 0; - /* For flow based devices, map all packets to VNI 0 */ - if (vs->flags & VXLAN_F_COLLECT_METADATA) - vni = 0; - - /* Is this VNI defined? */ - vxlan = vxlan_vs_find_vni(vs, vni); - if (!vxlan) - goto drop; - skb_reset_mac_header(skb); - skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); skb->protocol = eth_type_trans(skb, vxlan->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -1246,48 +1274,45 @@ drop: static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; + struct vxlan_dev *vxlan; struct vxlan_sock *vs; - struct vxlanhdr *vxh; - u32 flags, vni; + struct vxlanhdr unparsed; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) - goto error; - - vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); - flags = ntohl(vxh->vx_flags); - vni = ntohl(vxh->vx_vni); + return 1; - if (flags & VXLAN_HF_VNI) { - flags &= ~VXLAN_HF_VNI; - } else { - /* VNI flag always required to be set */ - goto bad_flags; + unparsed = *vxlan_hdr(skb); + /* VNI flag always required to be set */ + if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { + netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", + ntohl(vxlan_hdr(skb)->vx_flags), + ntohl(vxlan_hdr(skb)->vx_vni)); + /* Return non vxlan pkt */ + return 1; } - - if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) - goto drop; - vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); + unparsed.vx_flags &= ~VXLAN_HF_VNI; + unparsed.vx_vni &= ~VXLAN_VNI_MASK; vs = rcu_dereference_sk_user_data(sk); if (!vs) goto drop; - if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { - vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni, - !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL)); - if (!vxh) - goto drop; + vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni)); + if (!vxlan) + goto drop; - flags &= ~VXLAN_HF_RCO; - vni &= VXLAN_VNI_MASK; - } + if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB), + !net_eq(vxlan->net, dev_net(vxlan->dev)))) + goto drop; if (vxlan_collect_metadata(vs)) { + __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); + tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, - cpu_to_be64(vni >> 8), sizeof(*md)); + vxlan_vni_to_tun_id(vni), sizeof(*md)); if (!tun_dst) goto drop; @@ -1300,25 +1325,13 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) /* For backwards compatibility, only allow reserved fields to be * used by VXLAN extensions if explicitly requested. */ - if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) { - struct vxlanhdr_gbp *gbp; - - gbp = (struct vxlanhdr_gbp *)vxh; - md->gbp = ntohs(gbp->policy_id); - - if (tun_dst) - tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; - - if (gbp->dont_learn) - md->gbp |= VXLAN_GBP_DONT_LEARN; - - if (gbp->policy_applied) - md->gbp |= VXLAN_GBP_POLICY_APPLIED; - - flags &= ~VXLAN_GBP_USED_BITS; - } + if (vs->flags & VXLAN_F_REMCSUM_RX) + if (!vxlan_remcsum(&unparsed, skb, vs->flags)) + goto drop; + if (vs->flags & VXLAN_F_GBP) + vxlan_parse_gbp_hdr(&unparsed, md, tun_dst); - if (flags || vni & ~VXLAN_VNI_MASK) { + if (unparsed.vx_flags || unparsed.vx_vni) { /* If there are any unprocessed flags remaining treat * this as a malformed packet. This behavior diverges from * VXLAN RFC (RFC7348) which stipulates that bits in reserved @@ -1327,28 +1340,19 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) * is more robust and provides a little more security in * adding extensions to VXLAN. */ - - goto bad_flags; + goto drop; } - vxlan_rcv(vs, skb, md, vni >> 8, tun_dst); + vxlan_rcv(vxlan, vs, skb, md, tun_dst); return 0; drop: - /* Consume bad packet */ - kfree_skb(skb); - return 0; - -bad_flags: - netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", - ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); - -error: if (tun_dst) dst_release((struct dst_entry *)tun_dst); - /* Return non vxlan pkt */ - return 1; + /* Consume bad packet */ + kfree_skb(skb); + return 0; } static int arp_reduce(struct net_device *dev, struct sk_buff *skb) @@ -1673,7 +1677,7 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, return; gbp = (struct vxlanhdr_gbp *)vxh; - vxh->vx_flags |= htonl(VXLAN_HF_GBP); + vxh->vx_flags |= VXLAN_HF_GBP; if (md->gbp & VXLAN_GBP_DONT_LEARN) gbp->dont_learn = 1; @@ -1684,20 +1688,15 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); } -#if IS_ENABLED(CONFIG_IPV6) -static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, - struct net_device *dev, struct in6_addr *saddr, - struct in6_addr *daddr, __u8 prio, __u8 ttl, - __be16 src_port, __be16 dst_port, __be32 vni, - struct vxlan_metadata *md, bool xnet, u32 vxflags) +static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, + int iphdr_len, __be32 vni, + struct vxlan_metadata *md, u32 vxflags, + bool udp_sum) { struct vxlanhdr *vxh; int min_headroom; int err; - bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX); int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - u16 hdrlen = sizeof(struct vxlanhdr); if ((vxflags & VXLAN_F_REMCSUM_TX) && skb->ip_summed == CHECKSUM_PARTIAL) { @@ -1706,50 +1705,39 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, if (csum_start <= VXLAN_MAX_REMCSUM_START && !(csum_start & VXLAN_RCO_SHIFT_MASK) && (skb->csum_offset == offsetof(struct udphdr, check) || - skb->csum_offset == offsetof(struct tcphdr, check))) { - udp_sum = false; + skb->csum_offset == offsetof(struct tcphdr, check))) type |= SKB_GSO_TUNNEL_REMCSUM; - } } - skb_scrub_packet(skb, xnet); - min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len - + VXLAN_HLEN + sizeof(struct ipv6hdr) + + VXLAN_HLEN + iphdr_len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); if (unlikely(err)) { kfree_skb(skb); - goto err; + return err; } skb = vlan_hwaccel_push_inside(skb); - if (WARN_ON(!skb)) { - err = -ENOMEM; - goto err; - } + if (WARN_ON(!skb)) + return -ENOMEM; - skb = iptunnel_handle_offloads(skb, udp_sum, type); - if (IS_ERR(skb)) { - err = -EINVAL; - goto err; - } + skb = iptunnel_handle_offloads(skb, type); + if (IS_ERR(skb)) + return PTR_ERR(skb); vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); - vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = vni; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(vni); if (type & SKB_GSO_TUNNEL_REMCSUM) { - u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> - VXLAN_RCO_SHIFT; - - if (skb->csum_offset == offsetof(struct udphdr, check)) - data |= VXLAN_RCO_UDP; + unsigned int start; - vxh->vx_vni |= htonl(data); - vxh->vx_flags |= htonl(VXLAN_HF_RCO); + start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); + vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); + vxh->vx_flags |= VXLAN_HF_RCO; if (!skb_is_gso(skb)) { skb->ip_summed = CHECKSUM_NONE; @@ -1761,102 +1749,63 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, vxlan_build_gbp_hdr(vxh, vxflags, md); skb_set_inner_protocol(skb, htons(ETH_P_TEB)); - - udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio, - ttl, src_port, dst_port, - !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX)); return 0; -err: - dst_release(dst); - return err; } -#endif -static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, - __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, - __be16 src_port, __be16 dst_port, __be32 vni, - struct vxlan_metadata *md, bool xnet, u32 vxflags) +static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, + struct sk_buff *skb, int oif, u8 tos, + __be32 daddr, __be32 *saddr, + struct dst_cache *dst_cache, + struct ip_tunnel_info *info) { - struct vxlanhdr *vxh; - int min_headroom; - int err; - bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM); - int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - u16 hdrlen = sizeof(struct vxlanhdr); - - if ((vxflags & VXLAN_F_REMCSUM_TX) && - skb->ip_summed == CHECKSUM_PARTIAL) { - int csum_start = skb_checksum_start_offset(skb); - - if (csum_start <= VXLAN_MAX_REMCSUM_START && - !(csum_start & VXLAN_RCO_SHIFT_MASK) && - (skb->csum_offset == offsetof(struct udphdr, check) || - skb->csum_offset == offsetof(struct tcphdr, check))) { - udp_sum = false; - type |= SKB_GSO_TUNNEL_REMCSUM; - } - } - - min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len - + VXLAN_HLEN + sizeof(struct iphdr) - + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); + struct rtable *rt = NULL; + bool use_cache = false; + struct flowi4 fl4; - /* Need space for new headers (invalidates iph ptr) */ - err = skb_cow_head(skb, min_headroom); - if (unlikely(err)) { - kfree_skb(skb); - return err; + /* when the ip_tunnel_info is availble, the tos used for lookup is + * packet independent, so we can use the cache + */ + if (!skb->mark && (!tos || info)) { + use_cache = true; + rt = dst_cache_get_ip4(dst_cache, saddr); + if (rt) + return rt; } - skb = vlan_hwaccel_push_inside(skb); - if (WARN_ON(!skb)) - return -ENOMEM; - - skb = iptunnel_handle_offloads(skb, udp_sum, type); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); - vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = vni; - - if (type & SKB_GSO_TUNNEL_REMCSUM) { - u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> - VXLAN_RCO_SHIFT; - - if (skb->csum_offset == offsetof(struct udphdr, check)) - data |= VXLAN_RCO_UDP; - - vxh->vx_vni |= htonl(data); - vxh->vx_flags |= htonl(VXLAN_HF_RCO); + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = oif; + fl4.flowi4_tos = RT_TOS(tos); + fl4.flowi4_mark = skb->mark; + fl4.flowi4_proto = IPPROTO_UDP; + fl4.daddr = daddr; + fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; - if (!skb_is_gso(skb)) { - skb->ip_summed = CHECKSUM_NONE; - skb->encapsulation = 0; - } + rt = ip_route_output_key(vxlan->net, &fl4); + if (!IS_ERR(rt)) { + *saddr = fl4.saddr; + if (use_cache) + dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); } - - if (vxflags & VXLAN_F_GBP) - vxlan_build_gbp_hdr(vxh, vxflags, md); - - skb_set_inner_protocol(skb, htons(ETH_P_TEB)); - - udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df, - src_port, dst_port, xnet, - !(vxflags & VXLAN_F_UDP_CSUM)); - return 0; + return rt; } #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct sk_buff *skb, int oif, const struct in6_addr *daddr, - struct in6_addr *saddr) + struct in6_addr *saddr, + struct dst_cache *dst_cache) { struct dst_entry *ndst; struct flowi6 fl6; int err; + if (!skb->mark) { + ndst = dst_cache_get_ip6(dst_cache, saddr); + if (ndst) + return ndst; + } + memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.daddr = *daddr; @@ -1871,6 +1820,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, return ERR_PTR(err); *saddr = fl6.saddr; + if (!skb->mark) + dst_cache_set_ip6(dst_cache, ndst, saddr); return ndst; } #endif @@ -1923,22 +1874,24 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_rdst *rdst, bool did_rsc) { + struct dst_cache *dst_cache; struct ip_tunnel_info *info; struct vxlan_dev *vxlan = netdev_priv(dev); struct sock *sk; struct rtable *rt = NULL; const struct iphdr *old_iph; - struct flowi4 fl4; union vxlan_addr *dst; union vxlan_addr remote_ip; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; - u32 vni; + __be32 vni; __be16 df = 0; __u8 tos, ttl; int err; u32 flags = vxlan->flags; + bool udp_sum = false; + bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); info = skb_tunnel_info(skb); @@ -1946,6 +1899,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = rdst->remote_vni; dst = &rdst->remote_ip; + dst_cache = &rdst->dst_cache; } else { if (!info) { WARN_ONCE(1, "%s: Missing encapsulation instructions\n", @@ -1953,13 +1907,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; } dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; - vni = be64_to_cpu(info->key.tun_id); + vni = vxlan_tun_id_to_vni(info->key.tun_id); remote_ip.sa.sa_family = ip_tunnel_info_af(info); if (remote_ip.sa.sa_family == AF_INET) remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; else remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; dst = &remote_ip; + dst_cache = &info->dst_cache; } if (vxlan_addr_any(dst)) { @@ -1985,13 +1940,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vxlan->cfg.port_max, true); if (info) { - if (info->key.tun_flags & TUNNEL_CSUM) - flags |= VXLAN_F_UDP_CSUM; - else - flags &= ~VXLAN_F_UDP_CSUM; - ttl = info->key.ttl; tos = info->key.tos; + udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); if (info->options_len) md = ip_tunnel_info_opts(info); @@ -2000,22 +1951,23 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (dst->sa.sa_family == AF_INET) { + __be32 saddr; + if (!vxlan->vn4_sock) goto drop; sk = vxlan->vn4_sock->sock->sk; - if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)) - df = htons(IP_DF); - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; - fl4.flowi4_tos = RT_TOS(tos); - fl4.flowi4_mark = skb->mark; - fl4.flowi4_proto = IPPROTO_UDP; - fl4.daddr = dst->sin.sin_addr.s_addr; - fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; + if (info) { + if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) + df = htons(IP_DF); + } else { + udp_sum = !!(flags & VXLAN_F_UDP_CSUM); + } - rt = ip_route_output_key(vxlan->net, &fl4); + rt = vxlan_get_route(vxlan, skb, + rdst ? rdst->remote_ifindex : 0, tos, + dst->sin.sin_addr.s_addr, &saddr, + dst_cache, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &dst->sin.sin_addr.s_addr); @@ -2047,16 +1999,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr, - dst->sin.sin_addr.s_addr, tos, ttl, df, - src_port, dst_port, htonl(vni << 8), md, - !net_eq(vxlan->net, dev_net(vxlan->dev)), - flags); - if (err < 0) { - /* skb is already freed. */ - skb = NULL; - goto rt_tx_error; - } + err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), + vni, md, flags, udp_sum); + if (err < 0) + goto xmit_tx_error; + + udp_tunnel_xmit_skb(rt, sk, skb, saddr, + dst->sin.sin_addr.s_addr, tos, ttl, df, + src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; @@ -2069,7 +2019,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, - &dst->sin6.sin6_addr, &saddr); + &dst->sin6.sin6_addr, &saddr, + dst_cache); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); @@ -2101,11 +2052,20 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } + if (!info) + udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); + ttl = ttl ? : ip6_dst_hoplimit(ndst); - err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, - 0, ttl, src_port, dst_port, htonl(vni << 8), md, - !net_eq(vxlan->net, dev_net(vxlan->dev)), - flags); + skb_scrub_packet(skb, xnet); + err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), + vni, md, flags, udp_sum); + if (err < 0) { + dst_release(ndst); + return; + } + udp_tunnel6_xmit_skb(ndst, sk, skb, dev, + &saddr, &dst->sin6.sin6_addr, + 0, ttl, src_port, dst_port, !udp_sum); #endif } @@ -2115,6 +2075,9 @@ drop: dev->stats.tx_dropped++; goto tx_free; +xmit_tx_error: + /* skb is already freed. */ + skb = NULL; rt_tx_error: ip_rt_put(rt); tx_error: @@ -2252,7 +2215,7 @@ static void vxlan_cleanup(unsigned long arg) static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) { struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); - __u32 vni = vxlan->default_dst.remote_vni; + __be32 vni = vxlan->default_dst.remote_vni; spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); @@ -2381,31 +2344,6 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, - struct ip_tunnel_info *info, - __be16 sport, __be16 dport) -{ - struct vxlan_dev *vxlan = netdev_priv(dev); - struct rtable *rt; - struct flowi4 fl4; - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_tos = RT_TOS(info->key.tos); - fl4.flowi4_mark = skb->mark; - fl4.flowi4_proto = IPPROTO_UDP; - fl4.daddr = info->key.u.ipv4.dst; - - rt = ip_route_output_key(vxlan->net, &fl4); - if (IS_ERR(rt)) - return PTR_ERR(rt); - ip_rt_put(rt); - - info->key.u.ipv4.src = fl4.saddr; - info->key.tp_src = sport; - info->key.tp_dst = dport; - return 0; -} - static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -2417,9 +2355,16 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) dport = info->key.tp_dst ? : vxlan->cfg.dst_port; if (ip_tunnel_info_af(info) == AF_INET) { + struct rtable *rt; + if (!vxlan->vn4_sock) return -EINVAL; - return egress_ipv4_tun_info(dev, skb, info, sport, dport); + rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, + info->key.u.ipv4.dst, + &info->key.u.ipv4.src, NULL, info); + if (IS_ERR(rt)) + return PTR_ERR(rt); + ip_rt_put(rt); } else { #if IS_ENABLED(CONFIG_IPV6) struct dst_entry *ndst; @@ -2428,17 +2373,16 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; ndst = vxlan6_get_route(vxlan, skb, 0, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src); + &info->key.u.ipv6.src, NULL); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); - - info->key.tp_src = sport; - info->key.tp_dst = dport; #else /* !CONFIG_IPV6 */ return -EPFNOSUPPORT; #endif } + info->key.tp_src = sport; + info->key.tp_dst = dport; return 0; } @@ -2750,7 +2694,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, struct vxlan_config *conf) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); - struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; struct vxlan_rdst *dst = &vxlan->default_dst; unsigned short needed_headroom = ETH_HLEN; int err; @@ -2816,9 +2760,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, if (!vxlan->cfg.age_interval) vxlan->cfg.age_interval = FDB_AGE_DEFAULT; - if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET, - vxlan->cfg.dst_port, vxlan->flags)) + list_for_each_entry(tmp, &vn->vxlan_list, next) { + if (tmp->cfg.vni == conf->vni && + (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || + tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && + tmp->cfg.dst_port == vxlan->cfg.dst_port && + (tmp->flags & VXLAN_F_RCV_FLAGS) == + (vxlan->flags & VXLAN_F_RCV_FLAGS)) return -EEXIST; + } dev->ethtool_ops = &vxlan_ethtool_ops; @@ -2880,7 +2830,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, memset(&conf, 0, sizeof(conf)); if (data[IFLA_VXLAN_ID]) - conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); if (data[IFLA_VXLAN_GROUP]) { conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); @@ -2984,7 +2934,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, break; case -EEXIST: - pr_info("duplicate VNI %u\n", conf.vni); + pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni)); break; } @@ -3042,7 +2992,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) .high = htons(vxlan->cfg.port_max), }; - if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) + if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) goto nla_put_failure; if (!vxlan_addr_any(&dst->remote_ip)) { diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index cd39025d2abf..1bc5e93d2a34 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -571,8 +571,10 @@ static int x25_asy_open_tty(struct tty_struct *tty) /* Perform the low-level X.25 async init */ err = x25_asy_open(sl->dev); - if (err) + if (err) { + x25_asy_free(sl); return err; + } /* Done. We have linked the TTY line to a channel. */ return 0; } diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h index b610ea5caae8..c9223e9e962f 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.h +++ b/drivers/net/wireless/ath/ath10k/thermal.h @@ -36,7 +36,7 @@ struct ath10k_thermal { int temperature; }; -#ifdef CONFIG_THERMAL +#if IS_REACHABLE(CONFIG_THERMAL) int ath10k_thermal_register(struct ath10k *ar); void ath10k_thermal_unregister(struct ath10k *ar); void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature); diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a7afdeee698c..73fb4232f9f2 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -150,18 +150,18 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) return -EIO; } - if (magic == AR5416_EEPROM_MAGIC) { - *swap_needed = false; - } else if (swab16(magic) == AR5416_EEPROM_MAGIC) { + *swap_needed = false; + if (swab16(magic) == AR5416_EEPROM_MAGIC) { if (ah->ah_flags & AH_NO_EEP_SWAP) { ath_info(common, "Ignoring endianness difference in EEPROM magic bytes.\n"); - - *swap_needed = false; } else { *swap_needed = true; } - } else { + } else if (magic != AR5416_EEPROM_MAGIC) { + if (ath9k_hw_use_flash(ah)) + return 0; + ath_err(common, "Invalid EEPROM Magic (0x%04x).\n", magic); return -EINVAL; diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 98c9148a3450..3bbe73b6d05a 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -580,16 +580,10 @@ static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf, long channel; bool on; - char *kbuf = kmalloc(len + 1, GFP_KERNEL); - - if (!kbuf) - return -ENOMEM; - if (copy_from_user(kbuf, buf, len)) { - kfree(kbuf); - return -EIO; - } + char *kbuf = memdup_user_nul(buf, len); - kbuf[len] = '\0'; + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); rc = kstrtol(kbuf, 0, &channel); kfree(kbuf); if (rc) diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index ec013fbd6a81..72380af9dc52 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -1215,10 +1215,10 @@ void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev) case B43_BUS_BCMA: bcma_cc = &dev->dev->bdev->bus->drv_cc; - bcma_cc_write32(bcma_cc, BCMA_CC_CHIPCTL_ADDR, 0); - bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); - bcma_cc_set32(bcma_cc, BCMA_CC_CHIPCTL_DATA, 0x4); - bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); + bcma_cc_write32(bcma_cc, BCMA_CC_PMU_CHIPCTL_ADDR, 0); + bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4); + bcma_cc_set32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, 0x4); + bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4); break; #endif #ifdef CONFIG_B43_SSB @@ -4375,12 +4375,10 @@ redo: /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */ orig_dev = dev; mutex_unlock(&wl->mutex); - if (b43_bus_host_is_sdio(dev->dev)) { + if (b43_bus_host_is_sdio(dev->dev)) b43_sdio_free_irq(dev); - } else { - synchronize_irq(dev->dev->irq); + else free_irq(dev->dev->irq, dev); - } mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 53637399bb99..b98db8a0a069 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -879,11 +879,24 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn) return 0; } -static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) +void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) { + struct sdio_func *func; + struct mmc_host *host; + uint max_blocks; uint nents; int err; + func = sdiodev->func[2]; + host = func->card->host; + sdiodev->sg_support = host->max_segs > 1; + max_blocks = min_t(uint, host->max_blk_count, 511u); + sdiodev->max_request_size = min_t(uint, host->max_req_size, + max_blocks * func->cur_blksize); + sdiodev->max_segment_count = min_t(uint, host->max_segs, + SG_MAX_SINGLE_ALLOC); + sdiodev->max_segment_size = host->max_seg_size; + if (!sdiodev->sg_support) return; @@ -1021,9 +1034,6 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host) static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) { - struct sdio_func *func; - struct mmc_host *host; - uint max_blocks; int ret = 0; sdiodev->num_funcs = 2; @@ -1054,26 +1064,6 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) goto out; } - /* - * determine host related variables after brcmf_sdiod_probe() - * as func->cur_blksize is properly set and F2 init has been - * completed successfully. - */ - func = sdiodev->func[2]; - host = func->card->host; - sdiodev->sg_support = host->max_segs > 1; - max_blocks = min_t(uint, host->max_blk_count, 511u); - sdiodev->max_request_size = min_t(uint, host->max_req_size, - max_blocks * func->cur_blksize); - sdiodev->max_segment_count = min_t(uint, host->max_segs, - SG_MAX_SINGLE_ALLOC); - sdiodev->max_segment_size = host->max_seg_size; - - /* allocate scatter-gather table. sg support - * will be disabled upon allocation failure. - */ - brcmf_sdiod_sgtable_alloc(sdiodev); - ret = brcmf_sdiod_freezer_attach(sdiodev); if (ret) goto out; @@ -1084,7 +1074,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) ret = -ENODEV; goto out; } - brcmf_sdiod_host_fixup(host); + brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host); out: if (ret) brcmf_sdiod_remove(sdiodev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 7b01e4ddb315..d00c5c1d58bf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -247,7 +247,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n", ch->chan->center_freq, ch->center_freq1, ch->width); ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1); - primary_offset = ch->center_freq1 - ch->chan->center_freq; + primary_offset = ch->chan->center_freq - ch->center_freq1; switch (ch->width) { case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: @@ -256,24 +256,21 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, break; case NL80211_CHAN_WIDTH_40: ch_inf.bw = BRCMU_CHAN_BW_40; - if (primary_offset < 0) + if (primary_offset > 0) ch_inf.sb = BRCMU_CHAN_SB_U; else ch_inf.sb = BRCMU_CHAN_SB_L; break; case NL80211_CHAN_WIDTH_80: ch_inf.bw = BRCMU_CHAN_BW_80; - if (primary_offset < 0) { - if (primary_offset < -CH_10MHZ_APART) - ch_inf.sb = BRCMU_CHAN_SB_UU; - else - ch_inf.sb = BRCMU_CHAN_SB_UL; - } else { - if (primary_offset > CH_10MHZ_APART) - ch_inf.sb = BRCMU_CHAN_SB_LL; - else - ch_inf.sb = BRCMU_CHAN_SB_LU; - } + if (primary_offset == -30) + ch_inf.sb = BRCMU_CHAN_SB_LL; + else if (primary_offset == -10) + ch_inf.sb = BRCMU_CHAN_SB_LU; + else if (primary_offset == 10) + ch_inf.sb = BRCMU_CHAN_SB_UL; + else + ch_inf.sb = BRCMU_CHAN_SB_UU; break; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 82e4382eb177..0e8f2a079907 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -803,7 +803,14 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr, *eromaddr -= 4; return -EFAULT; } - } while (desc != DMP_DESC_ADDRESS); + } while (desc != DMP_DESC_ADDRESS && + desc != DMP_DESC_COMPONENT); + + /* stop if we crossed current component border */ + if (desc == DMP_DESC_COMPONENT) { + *eromaddr -= 4; + return 0; + } /* skip upper 32-bit address descriptor */ if (val & DMP_DESC_ADDRSIZE_GT32) @@ -876,7 +883,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S; /* need core with ports */ - if (nmw + nsw == 0) + if (nmw + nsw == 0 && + id != BCMA_CORE_PMU) continue; /* try to obtain register address info */ @@ -1006,6 +1014,7 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip) { struct brcmf_chip *pub; struct brcmf_core_priv *cc; + struct brcmf_core *pmu; u32 base; u32 val; int ret = 0; @@ -1017,11 +1026,15 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip) /* get chipcommon capabilites */ pub->cc_caps = chip->ops->read32(chip->ctx, CORE_CC_REG(base, capabilities)); + pub->cc_caps_ext = chip->ops->read32(chip->ctx, + CORE_CC_REG(base, + capabilities_ext)); /* get pmu caps & rev */ + pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */ if (pub->cc_caps & CC_CAP_PMU) { val = chip->ops->read32(chip->ctx, - CORE_CC_REG(base, pmucapabilities)); + CORE_CC_REG(pmu->base, pmucapabilities)); pub->pmurev = val & PCAP_REV_MASK; pub->pmucaps = val; } @@ -1120,6 +1133,23 @@ struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub) return &cc->pub; } +struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub) +{ + struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub); + struct brcmf_core *pmu; + + /* See if there is separated PMU core available */ + if (cc->rev >= 35 && + pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { + pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU); + if (pmu) + return pmu; + } + + /* Fallback to ChipCommon core for older hardware */ + return cc; +} + bool brcmf_chip_iscoreup(struct brcmf_core *pub) { struct brcmf_core_priv *core; @@ -1290,6 +1320,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) { u32 base, addr, reg, pmu_cc3_mask = ~0; struct brcmf_chip_priv *chip; + struct brcmf_core *pmu = brcmf_chip_get_pmu(pub); brcmf_dbg(TRACE, "Enter\n"); @@ -1309,9 +1340,9 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) case BRCM_CC_4335_CHIP_ID: case BRCM_CC_4339_CHIP_ID: /* read PMU chipcontrol register 3 */ - addr = CORE_CC_REG(base, chipcontrol_addr); + addr = CORE_CC_REG(pmu->base, chipcontrol_addr); chip->ops->write32(chip->ctx, addr, 3); - addr = CORE_CC_REG(base, chipcontrol_data); + addr = CORE_CC_REG(pmu->base, chipcontrol_data); reg = chip->ops->read32(chip->ctx, addr); return (reg & pmu_cc3_mask) != 0; case BRCM_CC_43430_CHIP_ID: @@ -1319,12 +1350,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) reg = chip->ops->read32(chip->ctx, addr); return reg != 0; default: - addr = CORE_CC_REG(base, pmucapabilities_ext); + addr = CORE_CC_REG(pmu->base, pmucapabilities_ext); reg = chip->ops->read32(chip->ctx, addr); if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0) return false; - addr = CORE_CC_REG(base, retention_ctl); + addr = CORE_CC_REG(pmu->base, retention_ctl); reg = chip->ops->read32(chip->ctx, addr); return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h index f6b5feea23d2..dd0ec3eba6a9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h @@ -27,6 +27,7 @@ * @chip: chip identifier. * @chiprev: chip revision. * @cc_caps: chipcommon core capabilities. + * @cc_caps_ext: chipcommon core extended capabilities. * @pmucaps: PMU capabilities. * @pmurev: PMU revision. * @rambase: RAM base address (only applicable for ARM CR4 chips). @@ -38,6 +39,7 @@ struct brcmf_chip { u32 chip; u32 chiprev; u32 cc_caps; + u32 cc_caps_ext; u32 pmucaps; u32 pmurev; u32 rambase; @@ -83,6 +85,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx, void brcmf_chip_detach(struct brcmf_chip *chip); struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid); struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip); +struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub); bool brcmf_chip_iscoreup(struct brcmf_core *core); void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset); void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 4265b50faa98..cfee477a6eb1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> +#include <linux/module.h> #include <brcmu_wifi.h> #include <brcmu_utils.h> #include "core.h" diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 1365c12b78fc..7269056d0044 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -93,7 +93,7 @@ static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp) c = nvp->data[nvp->pos]; if (c == '\n') return COMMENT; - if (is_whitespace(c)) + if (is_whitespace(c) || c == '\0') goto proceed; if (c == '#') return COMMENT; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index ef06f57a7a0e..d3c9f0d52ae3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -47,8 +47,7 @@ static const char BRCM_ ## fw_nvram_name ## _FIRMWARE_NAME[] = \ BRCMF_FW_DEFAULT_PATH fw; \ static const char BRCM_ ## fw_nvram_name ## _NVRAM_NAME[] = \ BRCMF_FW_DEFAULT_PATH nvram; \ -MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw); \ -MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH nvram) +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw); #define BRCMF_FW_DEF(fw_name, fw) \ static const char BRCM_ ## fw_name ## _FIRMWARE_NAME[] = \ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c index 2ca783fa50cf..7e269f9aa607 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c @@ -32,7 +32,7 @@ #define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256) #define BRCMF_FLOWRING_INVALID_IFIDX 0xff -#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16) +#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16) #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16) static const u8 brcmf_flowring_prio2fifo[] = { @@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], u8 prio, u8 ifidx) { struct brcmf_flowring_hash *hash; - u8 hash_idx; + u16 hash_idx; u32 i; bool found; bool sta; @@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], } hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) : BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx); + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); found = false; hash = flow->hash; for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { @@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], break; } hash_idx++; + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); } if (found) return hash[hash_idx].flowid; @@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], { struct brcmf_flowring_ring *ring; struct brcmf_flowring_hash *hash; - u8 hash_idx; + u16 hash_idx; u32 i; bool found; u8 fifo; @@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], } hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) : BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx); + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); found = false; hash = flow->hash; for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { @@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], break; } hash_idx++; + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); } if (found) { for (i = 0; i < flow->nrofrings; i++) { @@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], } -u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid) +u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; @@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid) } -static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid, +static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid, bool blocked) { struct brcmf_flowring_ring *ring; @@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid, } -void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) +void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; - u8 hash_idx; + u16 hash_idx; struct sk_buff *skb; ring = flow->rings[flowid]; @@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) } -u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb) { struct brcmf_flowring_ring *ring; @@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, } -struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid) +struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; struct sk_buff *skb; @@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid) } -void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, +void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb) { struct brcmf_flowring_ring *ring; @@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, } -u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid) +u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; @@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid) } -void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid) +void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; @@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid) } -u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid) +u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; - u8 hash_idx; + u16 hash_idx; ring = flow->rings[flowid]; hash_idx = ring->hash_id; @@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow) struct brcmf_pub *drvr = bus_if->drvr; struct brcmf_flowring_tdls_entry *search; struct brcmf_flowring_tdls_entry *remove; - u8 flowid; + u16 flowid; for (flowid = 0; flowid < flow->nrofrings; flowid++) { if (flow->rings[flowid]) @@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx, struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev); struct brcmf_pub *drvr = bus_if->drvr; u32 i; - u8 flowid; + u16 flowid; if (flow->addr_mode[ifidx] != addr_mode) { for (i = 0; i < ARRAY_SIZE(flow->hash); i++) { @@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx, struct brcmf_flowring_tdls_entry *prev; struct brcmf_flowring_tdls_entry *search; u32 i; - u8 flowid; + u16 flowid; bool sta; sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h index 95fd1c9675d1..068e68d94999 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h @@ -16,7 +16,7 @@ #define BRCMFMAC_FLOWRING_H -#define BRCMF_FLOWRING_HASHSIZE 256 +#define BRCMF_FLOWRING_HASHSIZE 512 /* has to be 2^x */ #define BRCMF_FLOWRING_INVALID_ID 0xFFFFFFFF @@ -24,7 +24,7 @@ struct brcmf_flowring_hash { u8 mac[ETH_ALEN]; u8 fifo; u8 ifidx; - u8 flowid; + u16 flowid; }; enum ring_status { @@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], u8 prio, u8 ifidx); u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], u8 prio, u8 ifidx); -void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid); -void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid); -u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid); -u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, +void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid); +void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid); +u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid); +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb); -struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid); -void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, +struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid); +void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb); -u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid); -u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid); +u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid); +u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid); struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings); void brcmf_flowring_detach(struct brcmf_flowring *flow); void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index c2bdb91746cf..922966734a7f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, } -static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid) +static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) { struct brcmf_flowring *flow = msgbuf->flow; struct brcmf_commonring *commonring; @@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev) } -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid) +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) { struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; struct msgbuf_tx_flowring_delete_req *delete; @@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) u32 count; if_msgbuf = drvr->bus_if->msgbuf; + + if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) { + brcmf_err("driver not configured for this many flowrings %d\n", + if_msgbuf->nrof_flowrings); + if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; + } + msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); if (!msgbuf) goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h index 3d513e407e3d..ee6906a3c3f6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h @@ -33,7 +33,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev); -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid); int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); #else diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 0480b70e3eb8..d5f9ef470447 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1951,6 +1951,9 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = { #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } +#define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \ + BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ + subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } static struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), @@ -1966,6 +1969,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID), + BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index dd6614332836..c790fa89db05 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -45,8 +45,8 @@ #include "chip.h" #include "firmware.h" -#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2000) -#define CTL_DONE_TIMEOUT msecs_to_jiffies(2000) +#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) +#define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) #ifdef DEBUG @@ -3615,7 +3615,6 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, const struct sdiod_drive_str *str_tab = NULL; u32 str_mask; u32 str_shift; - u32 base; u32 i; u32 drivestrength_sel = 0; u32 cc_data_temp; @@ -3658,14 +3657,15 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, } if (str_tab != NULL) { + struct brcmf_core *pmu = brcmf_chip_get_pmu(ci); + for (i = 0; str_tab[i].strength != 0; i++) { if (drivestrength >= str_tab[i].strength) { drivestrength_sel = str_tab[i].sel; break; } } - base = brcmf_chip_get_chipcommon(ci)->base; - addr = CORE_CC_REG(base, chipcontrol_addr); + addr = CORE_CC_REG(pmu->base, chipcontrol_addr); brcmf_sdiod_regwl(sdiodev, addr, 1, NULL); cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL); cc_data_temp &= ~str_mask; @@ -3835,8 +3835,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) goto fail; /* set PMUControl so a backplane reset does PMU state reload */ - reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base, - pmucontrol); + reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol); reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err); if (err) goto fail; @@ -4114,6 +4113,11 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) goto fail; } + /* allocate scatter-gather table. sg support + * will be disabled upon allocation failure. + */ + brcmf_sdiod_sgtable_alloc(bus->sdiodev); + /* Query the F2 block size, set roundup accordingly */ bus->blocksize = bus->sdiodev->func[2]->cur_blksize; bus->roundup = min(max_roundup, bus->blocksize); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 5ec7a6d87672..23f223150cef 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -342,6 +342,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, /* Issue an abort to the specified function */ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn); +void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev); void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, enum brcmf_sdiod_state state); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 866067789330..11932d53ea24 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -99,6 +99,18 @@ config IWLWIFI_UAPSD If unsure, say N. +config IWLWIFI_PCIE_RTPM + bool "Enable runtime power management mode for PCIe devices" + depends on IWLMVM && PM + default false + help + Say Y here to enable runtime power management for PCIe + devices. If enabled, the device will go into low power mode + when idle for a short period of time, allowing for improved + power saving during runtime. + + If unsure, say N. + menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c index 07a4c644fb9b..e9cef9de9ed8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c @@ -901,7 +901,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, /* bound gain by 2 bits value max, 3rd bit is sign */ data->delta_gain_code[i] = min(abs(delta_g), - (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); if (delta_g < 0) /* diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 1aabb5ec096f..1bbd17ada974 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -152,11 +152,14 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev, { struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); unsigned long on = 0; + unsigned long off = 0; if (brightness > 0) on = IWL_LED_SOLID; + else + off = IWL_LED_SOLID; - iwl_led_cmd(priv, on, 0); + iwl_led_cmd(priv, on, off); } static int iwl_led_blink_set(struct led_classdev *led_cdev, diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 29ea1c6705b4..4db4cb7aa73a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -396,7 +396,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - iwl_trans_d3_suspend(priv->trans, false); + iwl_trans_d3_suspend(priv->trans, false, true); goto out; @@ -469,7 +469,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) /* we'll clear ctx->vif during iwlagn_prepare_restart() */ vif = ctx->vif; - ret = iwl_trans_d3_resume(priv->trans, &d3_status, false); + ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true); if (ret) goto out_unlock; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index e60cf141ed79..fa41a5e1c890 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -74,16 +74,19 @@ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 #define IWL7265D_UCODE_API_MAX 20 +#define IWL3168_UCODE_API_MAX 20 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 13 #define IWL7265_UCODE_API_OK 13 #define IWL7265D_UCODE_API_OK 13 +#define IWL3168_UCODE_API_OK 20 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 13 #define IWL7265_UCODE_API_MIN 13 #define IWL7265D_UCODE_API_MIN 13 +#define IWL3168_UCODE_API_MIN 20 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d @@ -92,6 +95,8 @@ #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3165_NVM_VERSION 0x709 #define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL3168_NVM_VERSION 0xd01 +#define IWL3168_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265_NVM_VERSION 0x0a1d #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265D_NVM_VERSION 0x0c11 @@ -109,6 +114,9 @@ #define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" +#define IWL3168_FW_PRE "iwlwifi-3168-" +#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode" + #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" @@ -180,6 +188,12 @@ static const struct iwl_ht_params iwl7000_ht_params = { .ucode_api_ok = IWL7265_UCODE_API_OK, \ .ucode_api_min = IWL7265_UCODE_API_MIN +#define IWL_DEVICE_3008 \ + IWL_DEVICE_7000_COMMON, \ + .ucode_api_max = IWL3168_UCODE_API_MAX, \ + .ucode_api_ok = IWL3168_UCODE_API_OK, \ + .ucode_api_min = IWL3168_UCODE_API_MIN + #define IWL_DEVICE_7005D \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \ @@ -299,11 +313,11 @@ const struct iwl_cfg iwl3165_2ac_cfg = { const struct iwl_cfg iwl3168_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3168", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, + .fw_name_pre = IWL3168_FW_PRE, + IWL_DEVICE_3008, .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3165_NVM_VERSION, - .nvm_calib_ver = IWL3165_TX_POWER_VERSION, + .nvm_ver = IWL3168_NVM_VERSION, + .nvm_calib_ver = IWL3168_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -376,5 +390,6 @@ const struct iwl_cfg iwl7265d_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index ecbf4822cd69..4b93404f46a7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -138,7 +138,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { .smem_offset = IWL9000_SMEM_OFFSET, \ .smem_len = IWL9000_SMEM_LEN, \ .thermal_params = &iwl9000_tt_params, \ - .apmg_not_supported = true + .apmg_not_supported = true, \ + .mq_rx_supported = true const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f99048135fb9..dad5570d6cc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -311,6 +311,7 @@ struct iwl_pwr_tx_backoff { * @dccm2_len: length of the second DCCM * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM + * @mq_rx_supported: multi-queue rx support * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -362,6 +363,7 @@ struct iwl_cfg { const u32 smem_len; const struct iwl_tt_params *thermal_params; bool apmg_not_supported; + bool mq_rx_supported; }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 5cc6be927eab..4ab6682ea53e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -312,6 +314,77 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 #define FH_MEM_TB_MAX_LENGTH (0x00020000) +/* 9000 rx series registers */ + +#define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ +#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define RFH_Q0_FRBDCB_WIDX 0xA08080 +#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) +/* Read index table */ +#define RFH_Q0_FRBDCB_RIDX 0xA080C0 +#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) +/* Used list table */ +#define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ +#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define RFH_Q0_URBDCB_WIDX 0xA08180 +#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) +#define RFH_Q0_URBDCB_VAID 0xA081C0 +#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) +/* stts */ +#define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ +#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) + +#define RFH_Q0_ORB_WPTR_LSB 0xA08280 +#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) +#define RFH_RBDBUF_RBD0_LSB 0xA08300 +#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) + +/* DMA configuration */ +#define RFH_RXF_DMA_CFG 0xA09820 +/* RB size */ +#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ +#define RFH_RXF_DMA_RB_SIZE_POS 16 +#define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS) +/* RB Circular Buffer size:defines the table sizes in RBD units */ +#define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ +#define RFH_RXF_DMA_RBDCB_SIZE_POS 20 +#define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ +#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 +#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) +#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ +#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ +#define RFH_DMA_EN_ENABLE_VAL BIT(31) + +#define RFH_RXF_RXQ_ACTIVE 0xA0980C + +#define RFH_GEN_CFG 0xA09800 +#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 +#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) +#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) +#define DEFAULT_RXQ_NUM 8 + +/* end of 9000 rx series registers */ + /* TFDB Area - TFDs buffer table */ #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) #define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) @@ -434,6 +507,10 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) */ #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) +#define MQ_RX_TABLE_SIZE 512 +#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1) +#define MQ_RX_POOL_SIZE MQ_RX_TABLE_MASK + #define RX_QUEUE_SIZE 256 #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h index a5aaf6853704..8425e1a587d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -293,6 +293,8 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a * threshold. * @FW_DBG_TDLS: trigger log collection upon TDLS related events. + * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when + * the firmware sends a tx reply. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -309,6 +311,7 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_BA, FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, + FW_DBG_TRIGGER_TX_STATUS, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 84f8aeb926c8..e2dbc67a367b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -297,10 +297,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching + * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics + * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different * sources for the MCC. This TLV bit is a future replacement to @@ -313,6 +315,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what * antenna the beacon should be transmitted + * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon + * from AP and will send it upon d0i3 exit. * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used @@ -330,10 +334,12 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = (__force iwl_ucode_tlv_capa_t)26, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, @@ -341,7 +347,9 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, + IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, NUM_IWL_UCODE_TLV_CAPA @@ -748,6 +756,19 @@ struct iwl_fw_dbg_trigger_tdls { } __packed; /** + * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response + * status. + * @statuses: the list of statuses to trigger the collection on + */ +struct iwl_fw_dbg_trigger_tx_status { + struct tx_status { + u8 status; + u8 reserved[3]; + } __packed statuses[16]; + __le32 reserved[2]; +} __packed; + +/** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id * @usniffer: should the uSniffer image be used diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index fd42f63f5e84..b88ecc7892a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -108,6 +108,8 @@ enum iwl_amsdu_size { * @power_level: power level, default = 1 * @debug_level: levels are IWL_DL_* * @ant_coupling: antenna coupling in dB, default = 0 + * @nvm_file: specifies a external NVM file + * @uapsd_disable: disable U-APSD, default = 1 * @d0i3_disable: disable d0i3, default = 1, * @d0i3_entry_delay: time to wait after no refs are taken before * entering D0i3 (in msecs) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 7b89bfc8c8ac..50f4cc60cf3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -539,7 +539,7 @@ static void iwl_set_hw_address_family_8000(struct device *dev, struct iwl_nvm_data *data, const __le16 *mac_override, const __le16 *nvm_hw, - u32 mac_addr0, u32 mac_addr1) + __le32 mac_addr0, __le32 mac_addr1) { const u8 *hw_addr; @@ -583,7 +583,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev, if (!is_valid_ether_addr(data->hw_addr)) IWL_ERR_DEV(dev, - "mac address from hw section is not valid\n"); + "mac address (%pM) from hw section is not valid\n", + data->hw_addr); return; } @@ -597,7 +598,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1) + __le32 mac_addr0, __le32 mac_addr1) { struct iwl_nvm_data *data; u32 sku; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 92466ee72806..4e8e0dc474d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -79,7 +79,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1); + __le32 mac_addr0, __le32 mac_addr1); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 82fb3a97a46d..0ca0f13b69b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -506,7 +506,7 @@ struct iwl_trans_config { bool sw_csum_tx; const struct iwl_hcmd_arr *command_groups; int command_groups_size; - + u32 sdio_adma_addr; }; @@ -618,9 +618,9 @@ struct iwl_trans_ops { void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*stop_device)(struct iwl_trans *trans, bool low_power); - void (*d3_suspend)(struct iwl_trans *trans, bool test); + void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test); + bool test, bool reset); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); @@ -736,6 +736,11 @@ enum iwl_plat_pm_mode { IWL_PLAT_PM_MODE_D0I3, }; +/* Max time to wait for trans to become idle/non-idle on d0i3 + * enter/exit (in msecs). + */ +#define IWL_TRANS_IDLE_TIMEOUT 2000 + /** * struct iwl_trans - transport common data * @@ -920,22 +925,23 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans) _iwl_trans_stop_device(trans, true); } -static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) +static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) { might_sleep(); if (trans->ops->d3_suspend) - trans->ops->d3_suspend(trans, test); + trans->ops->d3_suspend(trans, test, reset); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test) + bool test, bool reset) { might_sleep(); if (!trans->ops->d3_resume) return 0; - return trans->ops->d3_resume(trans, status, test); + return trans->ops->d3_resume(trans, status, test, reset); } static inline void iwl_trans_ref(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index b00c03fcd447..4b560e4417ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -6,7 +6,8 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +33,8 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,6 +75,7 @@ #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ +#define IWL_MVM_P2P_UAPSD_STANDALONE 0 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) @@ -107,6 +110,7 @@ #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 +#define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index d3e21d95cece..346376187ef8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -851,7 +853,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | - ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; + ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | + ENABLE_STORE_BEACON; /* Query the last used seqno and set it */ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); @@ -1023,14 +1026,18 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct ieee80211_sta *ap_sta) { int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; - ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); - if (ret) - return ret; + ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); + if (ret) + return ret; + } if (!iwlwifi_mod_params.sw_crypto) { /* @@ -1072,10 +1079,14 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; + } /* rfkill release can be either for wowlan or netdetect */ if (wowlan->rfkill_release) @@ -1151,6 +1162,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, }; int ret; int len __maybe_unused; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!wowlan) { /* @@ -1236,7 +1249,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_trans_d3_suspend(mvm->trans, test); + iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1299,7 +1312,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); mutex_unlock(&mvm->d0i3_suspend_mutex); - iwl_trans_d3_suspend(trans, false); + iwl_trans_d3_suspend(trans, false, false); return 0; } @@ -2041,9 +2054,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; - int ret; + int ret = 1; enum iwl_d3_status d3_status; bool keep = false; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | + CMD_WAKE_UP_TRANS; mutex_lock(&mvm->mutex); @@ -2052,7 +2070,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) if (IS_ERR_OR_NULL(vif)) goto err; - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); if (ret) goto err; @@ -2095,17 +2113,28 @@ out_iterate: iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); out: - /* return 1 to reconfigure the device */ + if (unified_image && !ret) { + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); + if (!ret) /* D3 ended successfully - no need to reset device */ + return 0; + } + + /* + * Reconfigure the device in one of the following cases: + * 1. We are not using a unified image + * 2. We are using a unified image but had an error while exiting D3 + */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); - - /* We always return 1, which causes mac80211 to do a reconfig - * with IEEE80211_RECONFIG_TYPE_RESTART. This type of - * reconfig calls iwl_mvm_restart_complete(), where we unref - * the IWL_MVM_REF_UCODE_DOWN, so we need to take the - * reference here. + /* + * When switching images we return 1, which causes mac80211 + * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. + * This type of reconfig calls iwl_mvm_restart_complete(), + * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need + * to take the reference here. */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + return 1; } @@ -2122,7 +2151,7 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) enum iwl_d3_status d3_status; struct iwl_trans *trans = mvm->trans; - iwl_trans_d3_resume(trans, &d3_status, false); + iwl_trans_d3_resume(trans, &d3_status, false, false); /* * make sure to clear D0I3_DEFER_WAKEUP before diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 9e0d46368cdd..14004456bf55 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1255,6 +1257,7 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; + bool prev; u8 value; int ret; @@ -1265,7 +1268,9 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, return -EINVAL; mutex_lock(&mvm->mutex); - iwl_mvm_update_low_latency(mvm, vif, value); + prev = iwl_mvm_vif_low_latency(mvmvif); + mvmvif->low_latency_dbgfs = value; + iwl_mvm_update_low_latency(mvm, vif, prev); mutex_unlock(&mvm->mutex); return count; @@ -1277,11 +1282,15 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[2]; + char buf[30] = {}; + int len; - buf[0] = mvmvif->low_latency ? '1' : '0'; - buf[1] = '\n'; - return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); + len = snprintf(buf, sizeof(buf) - 1, + "traffic=%d\ndbgfs=%d\nvcmd=%d\n", + mvmvif->low_latency_traffic, + mvmvif->low_latency_dbgfs, + mvmvif->low_latency_vcmd); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, @@ -1363,6 +1372,59 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); } +static void iwl_dbgfs_quota_check(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int *ret = data; + + if (mvmvif->dbgfs_quota_min) + *ret = -EINVAL; +} + +static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + if (value > 95) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + mvmvif->dbgfs_quota_min = 0; + ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_dbgfs_quota_check, &ret); + if (ret == 0) { + mvmvif->dbgfs_quota_min = value; + iwl_mvm_update_quotas(mvm, false, NULL); + } + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_quota_min_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[10]; + int len; + + len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1386,6 +1448,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1423,6 +1486,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 90500e2d107b..c529e5355803 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -261,17 +262,18 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, { struct iwl_mvm *mvm = file->private_data; char buf[16]; - int pos, temp; + int pos, ret; + s32 temp; if (!mvm->ucode_loaded) return -EIO; mutex_lock(&mvm->mutex); - temp = iwl_mvm_get_temp(mvm); + ret = iwl_mvm_get_temp(mvm, &temp); mutex_unlock(&mvm->mutex); - if (temp < 0) - return temp; + if (ret) + return -EIO; pos = scnprintf(buf , sizeof(buf), "%d\n", temp); @@ -942,6 +944,47 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, return count; } +static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | + IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + }; + int ret, i, num_repeats, nbytes = count / 2; + + ret = hex2bin(cmd.indirection_table, buf, nbytes); + if (ret) + return ret; + + /* + * The input is the redirection table, partial or full. + * Repeat the pattern if needed. + * For example, input of 01020F will be repeated 42 times, + * indirecting RSS hash results to queues 1, 2, 15 (skipping + * queues 3 - 14). + */ + num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; + for (i = 1; i < num_repeats; i++) + memcpy(&cmd.indirection_table[i * nbytes], + cmd.indirection_table, nbytes); + /* handle cut in the middle pattern for the last places */ + memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, + ARRAY_SIZE(cmd.indirection_table) % nbytes); + + memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -983,7 +1026,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm, trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) return -EOPNOTSUPP; - ret = kstrtouint(buf, 0, &rec_mode); + ret = kstrtoint(buf, 0, &rec_mode); if (ret) return ret; @@ -1454,6 +1497,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16); #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); @@ -1498,11 +1542,15 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); if (!debugfs_create_bool("enable_scan_iteration_notif", S_IRUSR | S_IWUSR, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled)) goto err; + if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR, + mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) + goto err; #ifdef CONFIG_IWLWIFI_BCAST_FILTERING if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h index 62b9a0a96700..eec52c57f718 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h @@ -251,6 +251,7 @@ enum iwl_wowlan_flags { ENABLE_L3_FILTERING = BIT(1), ENABLE_NBNS_FILTERING = BIT(2), ENABLE_DHCP_FILTERING = BIT(3), + ENABLE_STORE_BEACON = BIT(4), }; struct iwl_wowlan_config_cmd { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index fb6d341d6f3d..df939f51d9b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -287,16 +287,13 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4), IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), - /* TODO - verify this is the correct value */ IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, - /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */ IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, - /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */ IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), @@ -350,11 +347,11 @@ struct iwl_rx_mpdu_desc { /* DW8 */ __le32 filter_match; /* DW9 */ - __le32 gp2_on_air_rise; - /* DW10 */ __le32 rate_n_flags; + /* DW10 */ + u8 energy_a, energy_b, channel, reserved; /* DW11 */ - u8 energy_a, energy_b, energy_c, channel; + __le32 gp2_on_air_rise; /* DW12 & DW13 */ __le64 tsf_on_air_rise; } __packed; @@ -365,4 +362,33 @@ struct iwl_frame_release { __le16 nssn; }; +enum iwl_rss_hash_func_en { + IWL_RSS_HASH_TYPE_IPV4_TCP, + IWL_RSS_HASH_TYPE_IPV4_UDP, + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, + IWL_RSS_HASH_TYPE_IPV6_TCP, + IWL_RSS_HASH_TYPE_IPV6_UDP, + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, +}; + +#define IWL_RSS_HASH_KEY_CNT 10 +#define IWL_RSS_INDIRECTION_TABLE_SIZE 128 +#define IWL_RSS_ENABLE 1 + +/** + * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration + * + * @flags: 1 - enable, 0 - disable + * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en + * @secret_key: 320 bit input of random key configuration from driver + * @indirection_table: indirection table + */ +struct iwl_rss_config_cmd { + __le32 flags; + u8 hash_mask; + u8 reserved[3]; + __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; + u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; +} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ + #endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 6fca4fb1d306..90d911394836 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -253,6 +255,68 @@ struct iwl_mvm_keyinfo { __le64 hw_tkip_mic_tx_key; } __packed; +#define IWL_ADD_STA_STATUS_MASK 0xFF +#define IWL_ADD_STA_BAID_MASK 0xFF00 + +/** + * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. + * ( REPLY_ADD_STA = 0x18 ) + * @add_modify: 1: modify existing, 0: add new station + * @awake_acs: + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. + * @mac_id_n_color: the Mac context this station belongs to + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave + * alone. 1 - modify, 0 - don't change. + * @station_flags: look at %iwl_sta_flags + * @station_flags_msk: what of %station_flags have changed + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set + * add_immediate_ba_ssn. + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) + * Set %STA_MODIFY_REMOVE_BA_TID to use this field + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with + * add_immediate_ba_tid. + * @sleep_tx_count: number of packets to transmit to station even though it is + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode + * keeps track of STA sleep state. + * @sleep_state_flags: Look at %iwl_sta_sleep_flag. + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP + * mac-addr. + * @beamform_flags: beam forming controls + * @tfd_queue_msk: tfd queues used by this station + * + * The device contains an internal table of per-station information, with info + * on security keys, aggregation parameters, and Tx rates for initial Tx + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). + * + * ADD_STA sets up the table entry for one station, either creating a new + * entry, or modifying a pre-existing one. + */ +struct iwl_mvm_add_sta_cmd_v7 { + u8 add_modify; + u8 awake_acs; + __le16 tid_disable_tx; + __le32 mac_id_n_color; + u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + __le32 station_flags; + __le32 station_flags_msk; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + __le16 sleep_state_flags; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; +} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + /** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) @@ -282,6 +346,7 @@ struct iwl_mvm_keyinfo { * mac-addr. * @beamform_flags: beam forming controls * @tfd_queue_msk: tfd queues used by this station + * @rx_ba_window: aggregation window size * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx @@ -310,7 +375,9 @@ struct iwl_mvm_add_sta_cmd { __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; -} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + __le16 rx_ba_window; + __le16 reserved; +} __packed; /* ADD_STA_CMD_API_S_VER_8 */ /** * struct iwl_mvm_add_sta_key_cmd - add/modify sta key diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 0036d18334af..ba3f0bbddde8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -510,6 +510,9 @@ struct iwl_mvm_tx_resp { * @scd_ssn: the index of the last contiguously sent packet * @txed: number of Txed frames in this batch * @txed_2_done: number of Acked frames in this batch + * @reduced_txp: power reduced according to TPC. This is the actual value and + * not a copy from the LQ command. Thus, if not the first rate was used + * for Tx-ing then this value will be set to 0 by FW. */ struct iwl_mvm_ba_notif { __le32 sta_addr_lo32; @@ -524,7 +527,8 @@ struct iwl_mvm_ba_notif { __le16 scd_ssn; u8 txed; u8 txed_2_done; - __le16 reserved1; + u8 reduced_txp; + u8 reserved1; } __packed; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 82049bb139c2..f332497e29d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -213,6 +213,8 @@ enum { MFUART_LOAD_NOTIFICATION = 0xb1, + RSS_CONFIG_CMD = 0xb3, + REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, FRAME_RELEASE = 0xc3, @@ -280,11 +282,16 @@ enum iwl_phy_ops_subcmd_ids { DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; +enum iwl_prot_offload_subcmd_ids { + STORED_BEACON_NTF = 0xFF, +}; + /* command groups */ enum { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, PHY_OPS_GROUP = 0x4, + PROT_OFFLOAD_GROUP = 0xb, }; /** @@ -1851,4 +1858,28 @@ struct iwl_shared_mem_cfg { __le32 page_buff_size; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ +#define MAX_STORED_BEACON_SIZE 600 + +/** + * Stored beacon notification + * + * @system_time: system time on air rise + * @tsf: TSF on air rise + * @beacon_timestamp: beacon on air rise + * @phy_flags: general phy flags: band, modulation, etc. + * @channel: channel this beacon was received on + * @rates: rate in ucode internal format + * @byte_count: frame's byte count + */ +struct iwl_stored_beacon_notif { + __le32 system_time; + __le64 tsf; + __le32 beacon_timestamp; + __le16 phy_flags; + __le16 channel; + __le32 rates; + __le32 byte_count; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 0813f8184e10..4856eac120f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -435,6 +435,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) bool monitor_dump_only = false; int i; + if (!IWL_MVM_COLLECT_FW_ERR_DUMP && + !mvm->trans->dbg_dest_tlv) + return; + lockdep_assert_held(&mvm->mutex); /* there's no point in fw dump if the bus is dead */ @@ -640,8 +644,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* Dump fw's virtual image */ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { - u32 i; - for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 4ed5180c547b..070e2af05ca2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -107,6 +108,24 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) sizeof(tx_ant_cmd), &tx_ant_cmd); } +static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) +{ + int i; + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | + IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + }; + + for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) + cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; + memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); + + return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); +} + static void iwl_free_fw_paging(struct iwl_mvm *mvm) { int i; @@ -894,6 +913,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + /* Init RSS configuration */ + if (iwl_mvm_has_new_rx_api(mvm)) { + ret = iwl_send_rss_cfg_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", + ret); + goto error; + } + } + /* init the fw <-> mac80211 STA mapping */ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index bf1e5eb5dbdb..535134d639e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -744,7 +744,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, * wake-ups. */ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count) { + if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); } else { @@ -1462,3 +1462,40 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, iwl_mvm_beacon_loss_iterator, mb); } + +void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_stored_beacon_notif *sb = (void *)pkt->data; + struct ieee80211_rx_status rx_status; + struct sk_buff *skb; + u32 size = le32_to_cpu(sb->byte_count); + + if (size == 0) + return; + + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return; + } + + /* update rx_status according to the notification's metadata */ + memset(&rx_status, 0, sizeof(rx_status)); + rx_status.mactime = le64_to_cpu(sb->tsf); + rx_status.device_timestamp = le32_to_cpu(sb->system_time); + rx_status.band = + (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), + rx_status.band); + + /* copy the data */ + memcpy(skb_put(skb, size), sb->data, size); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + + /* pass it as regular rx to mac80211 */ + ieee80211_rx_napi(mvm->hw, skb, NULL); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d70a1716f3e0..01476f545695 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -884,10 +884,10 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; break; } - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size); break; case IEEE80211_AMPDU_RX_STOP: - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu(mvm->cfg)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 5f3ac8cccf49..ebe37bb0ce4c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -346,8 +346,9 @@ struct iwl_mvm_vif_bf_data { * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @low_latency: indicates that this interface is in low-latency mode - * (VMACLowLatencyMode) + * @low_latency_traffic: indicates low latency traffic was detected + * @low_latency_dbgfs: low latency mode set from debugfs + * @low_latency_vcmd: low latency mode set from vendor command * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following @@ -375,7 +376,7 @@ struct iwl_mvm_vif { bool ap_ibss_active; bool pm_enabled; bool monitor_active; - bool low_latency; + bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; @@ -432,6 +433,7 @@ struct iwl_mvm_vif { struct iwl_dbgfs_pm dbgfs_pm; struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; + int dbgfs_quota_min; #endif enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; @@ -645,6 +647,7 @@ struct iwl_mvm { atomic_t pending_frames[IWL_MVM_STATION_COUNT]; u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; + u32 secret_key[IWL_RSS_HASH_KEY_CNT]; /* configured by mac80211 */ u32 rts_threshold; @@ -856,6 +859,12 @@ struct iwl_mvm { u32 ciphers[6]; struct iwl_mvm_tof_data tof_data; + + /* + * Drop beacons from other APs in AP mode when there are no connected + * clients. + */ + bool drop_bcn_ap_mode; }; /* Extract MVM priv from op_mode and _hw */ @@ -1005,10 +1014,18 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) IWL_MVM_BT_COEX_MPLUT; } +static inline +bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && + IWL_MVM_P2P_UAPSD_STANDALONE; +} + static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { - /* firmware flag isn't defined yet */ - return false; + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } extern const u8 iwl_mvm_ac_to_tx_fifo[]; @@ -1184,6 +1201,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, @@ -1417,8 +1436,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ - - return mvmvif->low_latency; + return mvmvif->low_latency_dbgfs || + mvmvif->low_latency_traffic || + mvmvif->low_latency_vcmd; } /* hw scheduler queue config */ @@ -1481,7 +1501,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); -int iwl_mvm_get_temp(struct iwl_mvm *mvm); +int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); /* Location Aware Regulatory */ struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 7a3da2da6fd0..c446e0da9789 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -300,7 +300,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; bool lar_enabled; - u32 mac_addr0, mac_addr1; + __le32 mac_addr0, mac_addr1; /* Checking for required sections */ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { @@ -337,8 +337,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) return NULL; /* read the mac address from WFMP registers */ - mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0); - mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1); + mac_addr0 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, + WFMP_MAC_ADDR_0)); + mac_addr1 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, + WFMP_MAC_ADDR_1)); hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 89ea70deeb84..09a94a5efb61 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -33,6 +33,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -267,6 +268,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { true), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), + RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, + iwl_mvm_rx_stored_beacon_notif, false), }; #undef RX_HANDLER @@ -344,6 +347,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(MAC_PM_POWER_TABLE), HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), HCMD_NAME(MFUART_LOAD_NOTIFICATION), + HCMD_NAME(RSS_CONFIG_CMD), HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), @@ -386,13 +390,20 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { + HCMD_NAME(STORED_BEACON_NTF), +}; + static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), + [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), }; - /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); @@ -481,6 +492,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } mvm->sf_state = SF_UNINIT; mvm->cur_ucode = IWL_UCODE_INIT; + mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); mutex_init(&mvm->d0i3_suspend_mutex); @@ -641,6 +653,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); + /* init RSS hash key */ + get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key)); + return op_mode; out_unregister: @@ -1196,7 +1211,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; cmd->offloading_tid = iter_data->offloading_tid; cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | - ENABLE_DHCP_FILTERING; + ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON; /* * The d0i3 uCode takes care of the nonqos counters, * so configure only the qos seq ones. @@ -1217,8 +1232,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) struct iwl_wowlan_config_cmd wowlan_config_cmd = { .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BEACON_MISS | - IWL_WOWLAN_WAKEUP_LINK_CHANGE | - IWL_WOWLAN_WAKEUP_BCN_FILTERING), + IWL_WOWLAN_WAKEUP_LINK_CHANGE), }; struct iwl_d3_manager_config d3_cfg_cmd = { .min_sleep_time = cpu_to_le32(1000), @@ -1268,6 +1282,12 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) /* configure wowlan configuration only if needed */ if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { + /* wake on beacons only if beacon storing isn't supported */ + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEACON_STORING)) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING); + iwl_mvm_wowlan_config_key_params(mvm, d0i3_iter_data.connected_vif, true, flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 9de159f1ef2d..f313910cd026 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -259,6 +259,26 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } +static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *is_p2p_standalone = _data; + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + *is_p2p_standalone = false; + break; + case NL80211_IFTYPE_STATION: + if (vif->bss_conf.assoc) + *is_p2p_standalone = false; + break; + + default: + break; + } +} + static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -268,9 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, ETH_ALEN)) return false; - if (vif->p2p && - !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD)) - return false; /* * Avoid using uAPSD if P2P client is associated to GO that uses * opportunistic power save. This is due to current FW limitation. @@ -287,6 +304,22 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, if (iwl_mvm_phy_ctx_count(mvm) >= 2) return false; + if (vif->p2p) { + /* Allow U-APSD only if p2p is stand alone */ + bool is_p2p_standalone = true; + + if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) + return false; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_p2p_standalone_iterator, + &is_p2p_standalone); + + if (!is_p2p_standalone) + return false; + } + return true; } @@ -544,7 +577,6 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_power_vifs { struct iwl_mvm *mvm; - struct ieee80211_vif *bf_vif; struct ieee80211_vif *bss_vif; struct ieee80211_vif *p2p_vif; struct ieee80211_vif *ap_vif; @@ -617,11 +649,6 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, if (mvmvif->phy_ctxt) if (mvmvif->phy_ctxt->id < MAX_PHYS) power_iterator->bss_active = true; - - if (mvmvif->bf_data.bf_enabled && - !WARN_ON(power_iterator->bf_vif)) - power_iterator->bf_vif = vif; - break; default: @@ -850,29 +877,9 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); } -static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_beacon_filter_cmd cmd = { - IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = cpu_to_le32(1), - }; - - if (!mvmvif->bf_data.bf_enabled) - return 0; - - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) - cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - - mvmvif->bf_data.ba_enabled = enable; - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); -} - -int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) +static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags, bool d0i3) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -883,12 +890,20 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); - if (!ret) + /* don't change bf_enabled in case of temporary d0i3 configuration */ + if (!ret && !d0i3) mvmvif->bf_data.bf_enabled = false; return ret; } +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags) +{ + return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false); +} + static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) { bool disable_ps; @@ -918,21 +933,26 @@ static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) } static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, - struct iwl_power_vifs *vifs) + struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif; - bool ba_enable; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = cpu_to_le32(1), + }; - if (!vifs->bf_vif) + if (!mvmvif->bf_data.bf_enabled) return 0; - mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); + if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vifs->bf_vif->bss_conf.ps || - iwl_mvm_vif_low_latency(mvmvif)); + mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || + mvm->ps_disabled || + !vif->bss_conf.ps || + iwl_mvm_vif_low_latency(mvmvif)); - return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) @@ -953,7 +973,10 @@ int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) if (ret) return ret; - return iwl_mvm_power_set_ba(mvm, &vifs); + if (vifs.bss_vif) + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); + + return 0; } int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) @@ -988,7 +1011,10 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) return ret; } - return iwl_mvm_power_set_ba(mvm, &vifs); + if (vifs.bss_vif) + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); + + return 0; } int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, @@ -1025,8 +1051,17 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, IWL_BF_CMD_CONFIG_D0I3, .bf_enable_beacon_filter = cpu_to_le32(1), }; - ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, - flags, true); + /* + * When beacon storing is supported - disable beacon filtering + * altogether - the latest beacon will be sent when exiting d0i3 + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEACON_STORING)) + ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags, + true); + else + ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, + flags, true); } else { if (mvmvif->bf_data.bf_enabled) ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index 0b762b4f8fad..2141db5bff82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -74,6 +76,9 @@ struct iwl_mvm_quota_iterator_data { int n_interfaces[MAX_BINDINGS]; int colors[MAX_BINDINGS]; int low_latency[MAX_BINDINGS]; +#ifdef CONFIG_IWLWIFI_DEBUGFS + int dbgfs_min[MAX_BINDINGS]; +#endif int n_low_latency_bindings; struct ieee80211_vif *disabled_vif; }; @@ -129,6 +134,12 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, data->n_interfaces[id]++; +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_quota_min) + data->dbgfs_min[id] = max(data->dbgfs_min[id], + mvmvif->dbgfs_quota_min); +#endif + if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { data->n_low_latency_bindings++; data->low_latency[id] = true; @@ -259,6 +270,11 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, if (data.n_interfaces[i] <= 0) cmd.quotas[idx].quota = cpu_to_le32(0); +#ifdef CONFIG_IWLWIFI_DEBUGFS + else if (data.dbgfs_min[i]) + cmd.quotas[idx].quota = + cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100); +#endif else if (data.n_low_latency_bindings == 1 && n_non_lowlat && data.low_latency[i]) /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 7bb6fd0e4391..6e7e78a37879 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2,6 +2,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -724,14 +725,28 @@ static int _rs_collect_tx_data(struct iwl_mvm *mvm, return 0; } -static int rs_collect_tx_data(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - int scale_index, int attempts, int successes, - u8 reduced_txp) +static int rs_collect_tpc_data(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes, + u8 reduced_txp) +{ + struct iwl_rate_scale_data *window = NULL; + + if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) + return -EINVAL; + + window = &tbl->tpc_win[reduced_txp]; + return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, + window); +} + +static int rs_collect_tlc_data(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) { struct iwl_rate_scale_data *window = NULL; - int ret; if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) return -EINVAL; @@ -745,16 +760,6 @@ static int rs_collect_tx_data(struct iwl_mvm *mvm, /* Select window for current tx bit rate */ window = &(tbl->win[scale_index]); - - ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, - window); - if (ret) - return ret; - - if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) - return -EINVAL; - - window = &tbl->tpc_win[reduced_txp]; return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, window); } @@ -1301,17 +1306,30 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * first index into rate scale table. */ if (info->flags & IEEE80211_TX_STAT_AMPDU) { - /* ampdu_ack_len = 0 marks no BA was received. In this case - * treat it as a single frame loss as we don't want the success - * ratio to dip too quickly because a BA wasn't received + rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index, + info->status.ampdu_len, + info->status.ampdu_ack_len, + reduced_txp); + + /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat + * it as a single frame loss as we don't want the success ratio + * to dip too quickly because a BA wasn't received. + * For TPC, there's no need for this optimisation since we want + * to recover very quickly from a bad power reduction and, + * therefore we'd like the success ratio to get an immediate hit + * when failing to get a BA, so we'd switch back to a lower or + * zero power reduction. When FW transmits agg with a rate + * different from the initial rate, it will not use reduced txp + * and will send BA notification twice (one empty with reduced + * txp equal to the value from LQ and one with reduced txp 0). + * We need to update counters for each txp level accordingly. */ if (info->status.ampdu_ack_len == 0) info->status.ampdu_len = 1; - rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index, - info->status.ampdu_len, - info->status.ampdu_ack_len, - reduced_txp); + rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index, + info->status.ampdu_len, + info->status.ampdu_ack_len); /* Update success/fail counts if not searching for new mode */ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { @@ -1344,9 +1362,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, else continue; - rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index, - 1, i < retries ? 0 : legacy_success, - reduced_txp); + rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, + lq_rate.index, 1, + i < retries ? 0 : legacy_success, + reduced_txp); + rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, + lq_rate.index, 1, + i < retries ? 0 : legacy_success); } /* Update success/fail counts if not searching for new mode */ @@ -2040,7 +2062,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm, } /* try decreasing first if applicable */ - if (weak != TPC_INVALID) { + if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && + weak != TPC_INVALID) { if (weak_tpt == IWL_INVALID_VALUE && (strong_tpt == IWL_INVALID_VALUE || current_tpt >= strong_tpt)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 0c073e02fd4c..615dea143d4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -201,25 +201,22 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_mpdu_desc *desc, struct ieee80211_rx_status *rx_status) { - int energy_a, energy_b, energy_c, max_energy; + int energy_a, energy_b, max_energy; energy_a = desc->energy_a; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = desc->energy_b; energy_b = energy_b ? -energy_b : S8_MIN; - energy_c = desc->energy_c; - energy_c = energy_c ? -energy_c : S8_MIN; max_energy = max(energy_a, energy_b); - max_energy = max(max_energy, energy_c); - IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", - energy_a, energy_b, energy_c, max_energy); + IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n", + energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = 0; /* TODO: phy info */ rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; - rx_status->chain_signal[2] = energy_c; + rx_status->chain_signal[2] = S8_MIN; } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, @@ -294,7 +291,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; - struct ieee80211_hdr *hdr = (void *)(desc + 1); + struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); u32 len = le16_to_cpu(desc->mpdu_len); u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); struct ieee80211_sta *sta = NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 9a15642f80dd..1e1ab9daaec9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -930,8 +930,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; - if (type == mvm->scan_type) + if (type == mvm->scan_type) { + IWL_DEBUG_SCAN(mvm, + "Ignoring UMAC scan config of the same type\n"); return 0; + } cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; @@ -1109,7 +1112,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params, vif)); - if (type == IWL_MVM_SCAN_SCHED) + if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); if (iwl_mvm_scan_use_ebs(mvm, vif)) @@ -1351,7 +1354,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, type); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b556e33658d7..4854e79cbda8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,18 @@ #include "sta.h" #include "rs.h" +/* + * New version of ADD_STA_sta command added new fields at the end of the + * structure, so sending the size of the relevant API's structure is enough to + * support both API versions. + */ +static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) +{ + return iwl_mvm_has_new_rx_api(mvm) ? + sizeof(struct iwl_mvm_add_sta_cmd) : + sizeof(struct iwl_mvm_add_sta_cmd_v7); +} + static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { @@ -187,12 +201,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); break; @@ -357,12 +372,13 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id); @@ -623,12 +639,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, if (addr) memcpy(cmd.addr, addr, ETH_ALEN); - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Internal station added.\n"); return 0; @@ -819,7 +836,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #define IWL_MAX_RX_BA_SESSIONS 16 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start) + int tid, u16 ssn, bool start, u8 buf_size) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; @@ -839,6 +856,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (start) { cmd.add_immediate_ba_tid = (u8) tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); + cmd.rx_ba_window = cpu_to_le16((u16)buf_size); } else { cmd.remove_immediate_ba_tid = (u8) tid; } @@ -846,12 +864,13 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, STA_MODIFY_REMOVE_BA_TID; status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); @@ -904,12 +923,13 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: break; default: @@ -1640,7 +1660,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, }; int ret; - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } @@ -1731,7 +1752,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, - sizeof(cmd), &cmd); + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } @@ -1766,7 +1787,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, }; int ret; - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 39fdf5224e81..e3b9446ee995 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -401,7 +401,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start); + int tid, u16 ssn, bool start, u8 buf_size); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index fb76004eede4..758d05a8c6aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -194,12 +194,12 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); } -int iwl_mvm_get_temp(struct iwl_mvm *mvm) +int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) { struct iwl_notification_wait wait_temp_notif; static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE) }; - int ret, temp; + int ret; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION; @@ -208,7 +208,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, temp_notif, ARRAY_SIZE(temp_notif), - iwl_mvm_temp_notif_wait, &temp); + iwl_mvm_temp_notif_wait, temp); ret = iwl_mvm_get_temp_cmd(mvm); if (ret) { @@ -219,12 +219,10 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); - if (ret) { + if (ret) IWL_ERR(mvm, "Getting the temperature timed out\n"); - return ret; - } - return temp; + return ret; } static void check_exit_ctkill(struct work_struct *work) @@ -233,6 +231,7 @@ static void check_exit_ctkill(struct work_struct *work) struct iwl_mvm *mvm; u32 duration; s32 temp; + int ret; tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); @@ -250,13 +249,13 @@ static void check_exit_ctkill(struct work_struct *work) goto reschedule; } - temp = iwl_mvm_get_temp(mvm); + ret = iwl_mvm_get_temp(mvm, &temp); iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); __iwl_mvm_mac_stop(mvm); - if (temp < 0) + if (ret) goto reschedule; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 8bf48a7d0f4e..8a1638ec7e28 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -736,6 +736,37 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); } +static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, + u32 status) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_tx_status *status_trig; + int i; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); + status_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { + /* don't collect on status 0 */ + if (!status_trig->statuses[i].status) + break; + + if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) + continue; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "Tx status %d was received", + status & TX_STATUS_MSK); + break; + } +} + static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -784,6 +815,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, break; } + iwl_mvm_tx_status_check_trigger(mvm, status); + info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), info); @@ -1029,7 +1062,6 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); - mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc; mvmsta->tid_data[tid].tx_time = le16_to_cpu(tx_resp->wireless_media_time); } @@ -1060,7 +1092,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, /* TODO: not accounted if the whole A-MPDU failed */ info->status.tx_time = tid_data->tx_time; info->status.status_driver_data[0] = - (void *)(uintptr_t)tid_data->reduced_tpc; + (void *)(uintptr_t)ba_notif->reduced_txp; info->status.status_driver_data[1] = (void *)(uintptr_t)tid_data->rate_n_flags; } @@ -1133,6 +1165,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) scd_flow, ba_resp_scd_ssn, ba_notif->txed, ba_notif->txed_2_done); + IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", + ba_notif->reduced_txp); tid_data->next_reclaimed = ba_resp_scd_ssn; iwl_mvm_check_ratid_empty(mvm, sta, tid); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 3a989f5c20db..59453c176580 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -937,18 +937,16 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) } int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool value) + bool prev) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; lockdep_assert_held(&mvm->mutex); - if (mvmvif->low_latency == value) + if (iwl_mvm_vif_low_latency(mvmvif) == prev) return 0; - mvmvif->low_latency = value; - res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) return res; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 6261a68cae90..753ec6785912 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -66,6 +67,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> +#include <linux/pm_runtime.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/acpi.h> @@ -378,7 +380,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 3168 Series */ + {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, /* 7265 Series */ @@ -475,6 +480,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, /* 9000 Series */ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, @@ -623,6 +629,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_free_drv; + /* if RTPM is in use, enable it in our device */ + if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, + iwlwifi_mod_params.d0i3_entry_delay); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_allow(&pdev->dev); + } + return 0; out_free_drv: @@ -689,15 +704,132 @@ static int iwl_pci_resume(struct device *device) return 0; } -static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); +int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + if (test_bit(STATUS_FW_ERROR, &trans->status)) + return 0; + + set_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + + /* config the fw */ + ret = iwl_op_mode_enter_d0i3(trans->op_mode); + if (ret == 1) { + IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n"); + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + return -EBUSY; + } + if (ret) + goto err; + + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + test_bit(STATUS_TRANS_IDLE, &trans->status), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout entering D0i3\n"); + ret = -ETIMEDOUT; + goto err; + } + + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + + return 0; +err: + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + iwl_trans_fw_error(trans); + return ret; +} + +int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + /* sometimes a D0i3 entry is not followed through */ + if (!test_bit(STATUS_TRANS_IDLE, &trans->status)) + return 0; + + /* config the fw */ + ret = iwl_op_mode_exit_d0i3(trans->op_mode); + if (ret) + goto err; + + /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */ + + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + !test_bit(STATUS_TRANS_IDLE, &trans->status), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout exiting D0i3\n"); + ret = -ETIMEDOUT; + goto err; + } + + return 0; +err: + clear_bit(STATUS_TRANS_IDLE, &trans->status); + iwl_trans_fw_error(trans); + return ret; +} + +#ifdef CONFIG_IWLWIFI_PCIE_RTPM +static int iwl_pci_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + int ret; + + IWL_DEBUG_RPM(trans, "entering runtime suspend\n"); + + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + ret = iwl_pci_fw_enter_d0i3(trans); + if (ret < 0) + return ret; + } + + trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; + + iwl_trans_d3_suspend(trans, false, false); + + return 0; +} + +static int iwl_pci_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + enum iwl_d3_status d3_status; + + IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n"); + + iwl_trans_d3_resume(trans, &d3_status, false, false); + + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return iwl_pci_fw_exit_d0i3(trans); + + return 0; +} +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + +static const struct dev_pm_ops iwl_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, + iwl_pci_resume) +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, + iwl_pci_runtime_resume, + NULL) +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ +}; #define IWL_PM_OPS (&iwl_dev_pm_ops) -#else +#else /* CONFIG_PM_SLEEP */ #define IWL_PM_OPS NULL -#endif +#endif /* CONFIG_PM_SLEEP */ static struct pci_driver iwl_pci_driver = { .name = DRV_NAME, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index cc3888e2700d..2f959162d045 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -56,17 +57,23 @@ #define RX_NUM_QUEUES 1 #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) -#define RX_LOW_WATERMARK 8 +#define RX_PENDING_WATERMARK 16 struct iwl_host_cmd; /*This file includes the declaration that are internal to the * trans_pcie layer */ +/** + * struct iwl_rx_mem_buffer + * @page_dma: bus address of rxb page + * @page: driver's pointer to the rxb page + * @vid: index of this rxb in the global table + */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; + u16 vid; struct list_head list; }; @@ -90,8 +97,12 @@ struct isr_statistics { /** * struct iwl_rxq - Rx queue - * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @id: queue index + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). + * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) + * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free @@ -103,32 +114,34 @@ struct isr_statistics { * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: - * @pool: initial pool of iwl_rx_mem_buffer for the queue - * @queue: actual rx queue + * @queue: actual rx queue. Not used for multi-rx queue. * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { - __le32 *bd; + int id; + void *bd; dma_addr_t bd_dma; + __le32 *used_bd; + dma_addr_t used_bd_dma; u32 read; u32 write; u32 free_count; u32 used_count; u32 write_actual; + u32 queue_size; struct list_head rx_free; struct list_head rx_used; bool need_update; struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct napi_struct napi; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; }; /** * struct iwl_rb_allocator - Rx allocator - * @pool: initial pool of allocator * @req_pending: number of requests the allcator had not processed yet * @req_ready: number of requests honored and ready for claiming * @rbd_allocated: RBDs with pages allocated and ready to be handled to @@ -140,7 +153,6 @@ struct iwl_rxq { * @rx_alloc: work struct for background calls */ struct iwl_rb_allocator { - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; atomic_t req_pending; atomic_t req_ready; struct list_head rbd_allocated; @@ -280,6 +292,7 @@ struct iwl_txq { bool ampdu; bool block; unsigned long wd_timeout; + struct sk_buff_head overflow_q; }; static inline dma_addr_t @@ -297,6 +310,8 @@ struct iwl_tso_hdr_page { /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data + * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues + * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area @@ -323,13 +338,14 @@ struct iwl_tso_hdr_page { * @fw_mon_size: size of the buffer for the firmware monitor */ struct iwl_trans_pcie { - struct iwl_rxq rxq; + struct iwl_rxq *rxq; + struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE]; + struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; struct net_device napi_dev; - struct napi_struct napi; struct __percpu iwl_tso_hdr_page *tso_hdr_page; @@ -359,6 +375,7 @@ struct iwl_trans_pcie { bool ucode_write_complete; wait_queue_head_t ucode_write_waitq; wait_queue_head_t wait_command_queue; + wait_queue_head_t d0i3_waitq; u8 cmd_queue; u8 cmd_fifo; @@ -579,4 +596,7 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) } #endif +int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); +int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index ccafbd8cf4b3..51314e56209d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -140,8 +141,8 @@ */ static int iwl_rxq_space(const struct iwl_rxq *rxq) { - /* Make sure RX_QUEUE_SIZE is a power of 2 */ - BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); + /* Make sure rx queue size is a power of 2 */ + WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); /* * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity @@ -149,7 +150,7 @@ static int iwl_rxq_space(const struct iwl_rxq *rxq) * The following is equivalent to modulo by RX_QUEUE_SIZE and is well * defined for negative dividends. */ - return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); + return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); } /* @@ -160,6 +161,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) return cpu_to_le32((u32)(dma_addr >> 8)); } +static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) +{ + iwl_write_prph(trans, ofs, val & 0xffffffff); + iwl_write_prph(trans, ofs + 4, val >> 32); +} + /* * iwl_pcie_rx_stop - stops the Rx DMA */ @@ -173,10 +180,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans) /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ -static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; u32 reg; lockdep_assert_held(&rxq->lock); @@ -201,24 +207,73 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) } rxq->write_actual = round_down(rxq->write, 8); - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + if (trans->cfg->mq_rx_supported) + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), + rxq->write_actual); + else + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; + int i; - spin_lock(&rxq->lock); + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (!rxq->need_update) + continue; + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + rxq->need_update = false; + spin_unlock(&rxq->lock); + } +} + +static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_rx_mem_buffer *rxb; + + /* + * If the device isn't enabled - no need to try to add buffers... + * This can happen when we stop the device and still have an interrupt + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). + * So don't try to restock if the APM has been already stopped. + */ + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return; - if (!rxq->need_update) - goto exit_unlock; + spin_lock(&rxq->lock); + while (rxq->free_count) { + __le64 *bd = (__le64 *)rxq->bd; - iwl_pcie_rxq_inc_wr_ptr(trans); - rxq->need_update = false; + /* Get next free Rx buffer, remove from free list */ + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); - exit_unlock: + /* 12 first bits are expected to be empty */ + WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); + /* Point to Rx buffer via next RBD in circular buffer */ + bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; + rxq->free_count--; + } spin_unlock(&rxq->lock); + + /* + * If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. + */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + spin_unlock(&rxq->lock); + } } /* @@ -232,10 +287,8 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) * also updates the memory address in the firmware to reference the new * target buffer. */ -static void iwl_pcie_rxq_restock(struct iwl_trans *trans) +static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; /* @@ -251,6 +304,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) spin_lock(&rxq->lock); while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { + __le32 *bd = (__le32 *)rxq->bd; /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); @@ -261,7 +315,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) list_del(&rxb->list); /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); + bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; @@ -272,7 +326,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock(&rxq->lock); } } @@ -285,13 +339,9 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct page *page; gfp_t gfp_mask = priority; - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - if (trans_pcie->rx_page_order > 0) gfp_mask |= __GFP_COMP; @@ -301,16 +351,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", trans_pcie->rx_page_order); - /* Issue an error if the hardware has consumed more than half - * of its free buffer list and we don't have enough - * pre-allocated buffers. + /* + * Issue an error if we don't have enough pre-allocated + * buffers. ` */ - if (rxq->free_count <= RX_LOW_WATERMARK && - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && - net_ratelimit()) + if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) IWL_CRIT(trans, - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", - rxq->free_count); + "Failed to alloc_pages\n"); return NULL; } return page; @@ -325,10 +372,10 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; @@ -372,10 +419,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) __free_pages(page, trans_pcie->rx_page_order); return; } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); spin_lock(&rxq->lock); @@ -386,41 +429,24 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } } -static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) +static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; int i; - lockdep_assert_held(&rxq->lock); - - for (i = 0; i < RX_QUEUE_SIZE; i++) { - if (!rxq->pool[i].page) + for (i = 0; i < MQ_RX_POOL_SIZE; i++) { + if (!trans_pcie->rx_pool[i].page) continue; - dma_unmap_page(trans->dev, rxq->pool[i].page_dma, + dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); - __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); - rxq->pool[i].page = NULL; + __free_pages(trans_pcie->rx_pool[i].page, + trans_pcie->rx_page_order); + trans_pcie->rx_pool[i].page = NULL; } } /* - * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free - * - * When moving to rx_free an page is allocated for the slot. - * - * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called only during initialization - */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) -{ - iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); - - iwl_pcie_rxq_restock(trans); -} - -/* * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues * * Allocates for each received request 8 pages @@ -444,6 +470,11 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) while (pending) { int i; struct list_head local_allocated; + gfp_t gfp_mask = GFP_KERNEL; + + /* Do not post a warning if there are only a few requests */ + if (pending < RX_PENDING_WATERMARK) + gfp_mask |= __GFP_NOWARN; INIT_LIST_HEAD(&local_allocated); @@ -463,7 +494,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) BUG_ON(rxb->page); /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); + page = iwl_pcie_rx_alloc_page(trans, gfp_mask); if (!page) continue; rxb->page = page; @@ -477,10 +508,6 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) __free_pages(page, trans_pcie->rx_page_order); continue; } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); /* move the allocated entry to the out list */ list_move(&rxb->list, &local_allocated); @@ -561,38 +588,83 @@ static void iwl_pcie_rx_allocator_work(struct work_struct *data) static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; + int i; + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); + + if (WARN_ON(trans_pcie->rxq)) + return -EINVAL; - memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); + trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), + GFP_KERNEL); + if (!trans_pcie->rxq) + return -EINVAL; - spin_lock_init(&rxq->lock); spin_lock_init(&rba->lock); - if (WARN_ON(rxq->bd || rxq->rb_stts)) - return -EINVAL; + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - &rxq->bd_dma, GFP_KERNEL); - if (!rxq->bd) - goto err_bd; + spin_lock_init(&rxq->lock); + if (trans->cfg->mq_rx_supported) + rxq->queue_size = MQ_RX_TABLE_SIZE; + else + rxq->queue_size = RX_QUEUE_SIZE; - /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb_stts; + /* + * Allocate the circular buffer of Read Buffer Descriptors + * (RBDs) + */ + rxq->bd = dma_zalloc_coherent(dev, + free_size * rxq->queue_size, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err; + + if (trans->cfg->mq_rx_supported) { + rxq->used_bd = dma_zalloc_coherent(dev, + sizeof(__le32) * + rxq->queue_size, + &rxq->used_bd_dma, + GFP_KERNEL); + if (!rxq->used_bd) + goto err; + } + /*Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, + GFP_KERNEL); + if (!rxq->rb_stts) + goto err; + } return 0; -err_rb_stts: - dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; -err_bd: +err: + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq->bd) + dma_free_coherent(dev, free_size * rxq->queue_size, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + + if (rxq->used_bd) + dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; + } + kfree(trans_pcie->rxq); + return -ENOMEM; } @@ -659,65 +731,103 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } -static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 rb_size, enabled = 0; int i; - lockdep_assert_held(&rxq->lock); - - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - rxq->free_count = 0; - rxq->used_count = 0; + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_4K: + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + rb_size = RFH_RXF_DMA_RB_SIZE_8K; + break; + case IWL_AMSDU_12K: + rb_size = RFH_RXF_DMA_RB_SIZE_12K; + break; + default: + WARN_ON(1); + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + } - for (i = 0; i < RX_QUEUE_SIZE; i++) - list_add(&rxq->pool[i].list, &rxq->rx_used); -} + /* Stop Rx DMA */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + /* disable free amd used rx queue operation */ + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); + + for (i = 0; i < trans->num_rx_queues; i++) { + /* Tell device where to find RBD free table in DRAM */ + iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), + (u64)(trans_pcie->rxq[i].bd_dma)); + /* Tell device where to find RBD used table in DRAM */ + iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), + (u64)(trans_pcie->rxq[i].used_bd_dma)); + /* Tell device where in DRAM to update its Rx status */ + iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), + trans_pcie->rxq[i].rb_stts_dma); + /* Reset device indice tables */ + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); + iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); + iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); + + enabled |= BIT(i) | BIT(i + 16); + } -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) -{ - int i; + /* restock default queue */ + iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); - lockdep_assert_held(&rba->lock); + /* + * Enable Rx DMA + * Single frame mode + * Rx buffer size 4 or 8k or 12k + * Min RB size 4 or 8 + * 512 RBDs + */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG, + RFH_DMA_EN_ENABLE_VAL | + rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | + RFH_RXF_DMA_MIN_RB_4_8 | + RFH_RXF_DMA_RBDCB_SIZE_512); - INIT_LIST_HEAD(&rba->rbd_allocated); - INIT_LIST_HEAD(&rba->rbd_empty); + iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | + RFH_GEN_CFG_SERVICE_DMA_SNOOP); + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); - for (i = 0; i < RX_POOL_SIZE; i++) - list_add(&rba->pool[i].list, &rba->rbd_empty); + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; + lockdep_assert_held(&rxq->lock); - lockdep_assert_held(&rba->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->free_count = 0; + rxq->used_count = 0; +} - for (i = 0; i < RX_POOL_SIZE; i++) { - if (!rba->pool[i].page) - continue; - dma_unmap_page(trans->dev, rba->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); - rba->pool[i].page = NULL; - } +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +{ + WARN_ON(1); + return 0; } int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i, err; + int i, err, num_rbds, allocator_pool_size; - if (!rxq->bd) { + if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } + def_rxq = trans_pcie->rxq; if (!rba->alloc_wq) rba->alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); @@ -726,34 +836,68 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rx_free_rba(trans); - iwl_pcie_rx_init_rba(rba); + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); spin_unlock(&rba->lock); - spin_lock(&rxq->lock); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rxq_free_rbs(trans); - iwl_pcie_rx_init_rxb_lists(rxq); + iwl_pcie_free_rbs_pool(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) - rxq->queue[i] = NULL; + def_rxq->queue[i] = NULL; - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); - spin_unlock(&rxq->lock); + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - iwl_pcie_rx_replenish(trans); + rxq->id = i; - iwl_pcie_rx_hw_init(trans, rxq); + spin_lock(&rxq->lock); + /* + * Set read write pointer to reflect that we have processed + * and used all buffers, but have not restocked the Rx queue + * with fresh buffers + */ + rxq->read = 0; + rxq->write = 0; + rxq->write_actual = 0; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); - spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); - spin_unlock(&rxq->lock); + iwl_pcie_rx_init_rxb_lists(rxq); + + if (!rxq->napi.poll) + netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, + iwl_pcie_dummy_napi_poll, 64); + + spin_unlock(&rxq->lock); + } + + /* move the pool to the default queue and allocator ownerships */ + num_rbds = trans->cfg->mq_rx_supported ? + MQ_RX_POOL_SIZE : RX_QUEUE_SIZE; + allocator_pool_size = trans->num_rx_queues * + (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); + for (i = 0; i < num_rbds; i++) { + struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; + + if (i < allocator_pool_size) + list_add(&rxb->list, &rba->rbd_empty); + else + list_add(&rxb->list, &def_rxq->rx_used); + trans_pcie->global_table[i] = rxb; + rxb->vid = (u16)i; + } + + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); + if (trans->cfg->mq_rx_supported) { + iwl_pcie_rx_mq_hw_init(trans); + } else { + iwl_pcie_rxq_restock(trans, def_rxq); + iwl_pcie_rx_hw_init(trans, def_rxq); + } + + spin_lock(&def_rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); + spin_unlock(&def_rxq->lock); return 0; } @@ -761,12 +905,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); + int i; - /*if rxq->bd is NULL, it means that nothing has been allocated, - * exit now */ - if (!rxq->bd) { + /* + * if rxq is NULL, it means that nothing has been allocated, + * exit now + */ + if (!trans_pcie->rxq) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } @@ -777,27 +925,37 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rba->alloc_wq = NULL; } - spin_lock(&rba->lock); - iwl_pcie_rx_free_rba(trans); - spin_unlock(&rba->lock); - - spin_lock(&rxq->lock); - iwl_pcie_rxq_free_rbs(trans); - spin_unlock(&rxq->lock); - - dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; - - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - else - IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); - rxq->rb_stts_dma = 0; - rxq->rb_stts = NULL; + iwl_pcie_free_rbs_pool(trans); + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq->bd) + dma_free_coherent(trans->dev, + free_size * rxq->queue_size, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + else + IWL_DEBUG_INFO(trans, + "Free rxq->rb_stts which is NULL\n"); + + if (rxq->used_bd) + dma_free_coherent(trans->dev, + sizeof(__le32) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; + + if (rxq->napi.poll) + netif_napi_del(&rxq->napi); + } + kfree(trans_pcie->rxq); } /* @@ -841,11 +999,11 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, } static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, + struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; @@ -911,7 +1069,12 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); + if (rxq->id == 0) + iwl_op_mode_rx(trans->op_mode, &rxq->napi, + &rxcb); + else + iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, + &rxcb, rxq->id); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); @@ -975,7 +1138,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 r, i, j, count = 0; bool emergency = false; @@ -993,16 +1156,26 @@ restart: while (i != r) { struct iwl_rx_mem_buffer *rxb; - if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) + if (unlikely(rxq->used_count == rxq->queue_size / 2)) emergency = true; - rxb = rxq->queue[i]; - rxq->queue[i] = NULL; + if (trans->cfg->mq_rx_supported) { + /* + * used_bd is a 32 bit but only 12 are used to retrieve + * the vid + */ + u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]); + + rxb = trans_pcie->global_table[vid]; + } else { + rxb = rxq->queue[i]; + rxq->queue[i] = NULL; + } IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); - iwl_pcie_rx_handle_rb(trans, rxb, emergency); + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); - i = (i + 1) & RX_QUEUE_MASK; + i = (i + 1) & (rxq->queue_size - 1); /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - * try to claim the pre-allocated buffers from the allocator */ @@ -1040,10 +1213,10 @@ restart: count++; if (count == 8) { count = 0; - if (rxq->used_count < RX_QUEUE_SIZE / 3) + if (rxq->used_count < rxq->queue_size / 3) emergency = false; spin_unlock(&rxq->lock); - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); spin_lock(&rxq->lock); } } @@ -1055,7 +1228,10 @@ restart: if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { rxq->read = i; spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans); + if (trans->cfg->mq_rx_supported) + iwl_pcie_rxq_mq_restock(trans, rxq); + else + iwl_pcie_rxq_restock(trans, rxq); goto restart; } } @@ -1077,10 +1253,10 @@ restart: * will be restocked by the next call of iwl_pcie_rxq_restock. */ if (unlikely(emergency && count)) - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); - if (trans_pcie->napi.poll) - napi_gro_flush(&trans_pcie->napi, false); + if (rxq->napi.poll) + napi_gro_flush(&rxq->napi, false); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index d60a467a983c..b796952da644 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -72,6 +72,7 @@ #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/vmalloc.h> +#include <linux/pm_runtime.h> #include "iwl-drv.h" #include "iwl-trans.h" @@ -1218,11 +1219,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) _iwl_trans_pcie_stop_device(trans, true); } -static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { + if (!reset) { /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); @@ -1246,7 +1248,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) { + if (reset) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try @@ -1260,7 +1262,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test) + bool test, bool reset) { u32 val; int ret; @@ -1295,7 +1297,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, false); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { + if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } else { @@ -1353,6 +1355,10 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* ... rfkill can call stop_device and set it false if needed */ iwl_trans_pcie_rf_kill(trans, hw_rfkill); + /* Make sure we sync here, because we'll need full access later */ + if (low_power) + pm_runtime_resume(trans->dev); + return 0; } @@ -1422,12 +1428,6 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } -static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) -{ - WARN_ON(1); - return 0; -} - static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { @@ -1464,11 +1464,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ - if (!trans_pcie->napi.poll) { + if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) init_dummy_netdev(&trans_pcie->napi_dev); - netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, - iwl_pcie_dummy_napi_poll, 64); - } } void iwl_trans_pcie_free(struct iwl_trans *trans) @@ -1476,6 +1473,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; + /* TODO: check if this is really needed */ + pm_runtime_disable(trans->dev); + synchronize_irq(trans_pcie->pci_dev->irq); iwl_pcie_tx_free(trans); @@ -1489,9 +1489,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); - if (trans_pcie->napi.poll) - netif_napi_del(&trans_pcie->napi); - iwl_pcie_free_fw_monitor(trans); for_each_possible_cpu(i) { @@ -1831,6 +1828,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans) spin_lock_irqsave(&trans_pcie->ref_lock, flags); IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); trans_pcie->ref_count++; + pm_runtime_get(&trans_pcie->pci_dev->dev); spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -1849,6 +1847,10 @@ void iwl_trans_pcie_unref(struct iwl_trans *trans) return; } trans_pcie->ref_count--; + + pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); + pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -2001,29 +2003,48 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", - rxq->read); - pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", - rxq->write); - pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n", - rxq->write_actual); - pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n", - rxq->need_update); - pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", - rxq->free_count); - if (rxq->rb_stts) { - pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); - } else { - pos += scnprintf(buf + pos, bufsz - pos, - "closed_rb_num: Not Allocated\n"); + char *buf; + int pos = 0, i, ret; + size_t bufsz = sizeof(buf); + + bufsz = sizeof(char) * 121 * trans->num_rx_queues; + + if (!trans_pcie->rxq) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", + i); + pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", + rxq->write_actual); + pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", + rxq->need_update); + pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & + 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: Not Allocated\n"); + } } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; } static ssize_t iwl_dbgfs_interrupt_read(struct file *file, @@ -2188,7 +2209,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_len = PAGE_SIZE << trans_pcie->rx_page_order; - struct iwl_rxq *rxq = &trans_pcie->rxq; + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; spin_lock(&rxq->lock); @@ -2383,7 +2405,8 @@ static struct iwl_trans_dump_data u32 len, num_rbs; u32 monitor_len; int i, ptr; - bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && + !trans->cfg->mq_rx_supported; /* transport dump header */ len = sizeof(*dump_data); @@ -2438,11 +2461,12 @@ static struct iwl_trans_dump_data len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); if (dump_rbs) { + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = le16_to_cpu(ACCESS_ONCE( - trans_pcie->rxq.rb_stts->closed_rb_num)) + num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; - num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; + num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + (PAGE_SIZE << trans_pcie->rx_page_order)); @@ -2493,6 +2517,22 @@ static struct iwl_trans_dump_data return dump_data; } +#ifdef CONFIG_PM_SLEEP +static int iwl_trans_pcie_suspend(struct iwl_trans *trans) +{ + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) + return iwl_pci_fw_enter_d0i3(trans); + + return 0; +} + +static void iwl_trans_pcie_resume(struct iwl_trans *trans) +{ + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) + iwl_pci_fw_exit_d0i3(trans); +} +#endif /* CONFIG_PM_SLEEP */ + static const struct iwl_trans_ops trans_ops_pcie = { .start_hw = iwl_trans_pcie_start_hw, .op_mode_leave = iwl_trans_pcie_op_mode_leave, @@ -2503,6 +2543,11 @@ static const struct iwl_trans_ops trans_ops_pcie = { .d3_suspend = iwl_trans_pcie_d3_suspend, .d3_resume = iwl_trans_pcie_d3_resume, +#ifdef CONFIG_PM_SLEEP + .suspend = iwl_trans_pcie_suspend, + .resume = iwl_trans_pcie_resume, +#endif /* CONFIG_PM_SLEEP */ + .send_cmd = iwl_trans_pcie_send_hcmd, .tx = iwl_trans_pcie_tx, @@ -2541,7 +2586,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; u16 pci_cmd; - int ret; + int ret, addr_size; trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie, 0); @@ -2579,11 +2624,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } + if (cfg->mq_rx_supported) + addr_size = 64; + else + addr_size = 36; + pci_set_master(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); if (!ret) - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + ret = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(addr_size)); if (ret) { ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!ret) @@ -2686,6 +2737,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); + init_waitqueue_head(&trans_pcie->d0i3_waitq); + ret = iwl_pcie_alloc_ict(trans); if (ret) goto out_pci_disable_msi; @@ -2700,6 +2753,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->inta_mask = CSR_INI_SET_MASK; +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; +#else + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + return trans; out_free_ict: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 5262028b5505..837a7d536874 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,7 +1,8 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -33,7 +34,6 @@ #include <linux/sched.h> #include <net/ip6_checksum.h> #include <net/tso.h> -#include <net/ip6_checksum.h> #include "iwl-debug.h" #include "iwl-csr.h" @@ -571,6 +571,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return ret; spin_lock_init(&txq->lock); + __skb_queue_head_init(&txq->overflow_q); /* * Tell nic where to find circular buffer of Tx Frame Descriptors for @@ -621,6 +622,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); } txq->active = false; + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ @@ -1052,8 +1060,41 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_progress(txq); - if (iwl_queue_space(&txq->q) > txq->q.low_mark) - iwl_wake_queue(trans, txq); + if (iwl_queue_space(&txq->q) > txq->q.low_mark && + test_bit(txq_id, trans_pcie->queue_stopped)) { + struct sk_buff_head skbs; + + __skb_queue_head_init(&skbs); + skb_queue_splice_init(&txq->overflow_q, &skbs); + + /* + * This is tricky: we are in reclaim path which is non + * re-entrant, so noone will try to take the access the + * txq data from that path. We stopped tx, so we can't + * have tx as well. Bottom line, we can unlock and re-lock + * later. + */ + spin_unlock_bh(&txq->lock); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; + struct iwl_device_cmd *dev_cmd = + info->driver_data[dev_cmd_idx]; + + /* + * Note that we can very well be overflowing again. + * In that case, iwl_queue_space will be small again + * and we won't wake mac80211's queue. + */ + iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id); + } + spin_lock_bh(&txq->lock); + + if (iwl_queue_space(&txq->q) > txq->q.low_mark) + iwl_wake_queue(trans, txq); + } if (q->read_ptr == q->write_ptr) { IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); @@ -1686,6 +1727,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, wake_up(&trans_pcie->wait_command_queue); } + if (meta->flags & CMD_MAKE_TRANS_IDLE) { + IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", + iwl_get_cmd_string(trans, cmd->hdr.cmd)); + set_bit(STATUS_TRANS_IDLE, &trans->status); + wake_up(&trans_pcie->d0i3_waitq); + } + + if (meta->flags & CMD_WAKE_UP_TRANS) { + IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", + iwl_get_cmd_string(trans, cmd->hdr.cmd)); + clear_bit(STATUS_TRANS_IDLE, &trans->status); + wake_up(&trans_pcie->d0i3_waitq); + } + meta->flags = 0; spin_unlock_bh(&txq->lock); @@ -2161,6 +2216,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, csum = skb_checksum(skb, offs, skb->len - offs, 0); *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); + + skb->ip_summed = CHECKSUM_UNNECESSARY; } if (skb_is_nonlinear(skb) && @@ -2177,6 +2234,22 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + if (iwl_queue_space(q) < q->high_mark) { + iwl_stop_queue(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_queue_space(q) < 3)) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] = + dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + + spin_unlock(&txq->lock); + return 0; + } + } + /* In AGG mode, the index in the ring must correspond to the WiFi * sequence number. This is a HW requirements to help the SCD to parse * the BA. @@ -2281,12 +2354,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ - if (iwl_queue_space(q) < q->high_mark) { - if (wait_write_ptr) - iwl_pcie_txq_inc_wr_ptr(trans, txq); - else - iwl_stop_queue(trans, txq); - } spin_unlock(&txq->lock); return 0; out_err: diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 6df3ee561d52..515aa3f993f3 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, spin_lock_bh(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, rid, 0); - if (!res) - res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); + if (res) + goto unlock; + + res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); + if (res) + goto unlock; if (le16_to_cpu(rec.len) == 0) { /* RID not available */ res = -ENODATA; + goto unlock; } rlen = (le16_to_cpu(rec.len) - 1) * 2; - if (!res && exact_len && rlen != len) { + if (exact_len && rlen != len) { printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: " "rid=0x%04x, len=%d (expected %d)\n", dev->name, rid, rlen, len); res = -ENODATA; } - if (!res) - res = hfa384x_from_bap(dev, BAP0, buf, len); + res = hfa384x_from_bap(dev, BAP0, buf, len); +unlock: spin_unlock_bh(&local->baplock); mutex_unlock(&local->rid_bap_mtx); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c32889a1e39c..a28414c50edf 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -991,7 +991,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; } - if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2)) + if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, + ETH_ALEN, data->addresses[1].addr)) goto nla_put_failure; /* We get the skb->data */ @@ -2736,7 +2737,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { - if (mac80211_hwsim_addr_match(data, addr)) { + if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) { _found = true; break; } diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 86955c416b30..2eea76a340b7 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -2039,6 +2039,43 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + + if (!(priv->fwcapinfo & FW_CAPINFO_PS)) { + if (!enabled) + return 0; + else + return -EINVAL; + } + /* firmware does not work well with too long latency with power saving + * enabled, so do not enable it if there is only polling, no + * interrupts (like in some sdio hosts which can only + * poll for sdio irqs) + */ + if (priv->is_polling) { + if (!enabled) + return 0; + else + return -EINVAL; + } + if (!enabled) { + priv->psmode = LBS802_11POWERMODECAM; + if (priv->psstate != PS_STATE_FULL_POWER) + lbs_set_ps_mode(priv, + PS_MODE_ACTION_EXIT_PS, + true); + return 0; + } + if (priv->psmode != LBS802_11POWERMODECAM) + return 0; + priv->psmode = LBS802_11POWERMODEMAX_PSP; + if (priv->connect_status == LBS_CONNECTED) + lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, true); + return 0; +} /* * Initialization @@ -2057,6 +2094,7 @@ static struct cfg80211_ops lbs_cfg80211_ops = { .change_virtual_intf = lbs_change_intf, .join_ibss = lbs_join_ibss, .leave_ibss = lbs_leave_ibss, + .set_power_mgmt = lbs_set_power_mgmt, }; diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 0387a5b380c8..4ddd0e5a6b85 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -957,7 +957,7 @@ static void lbs_queue_cmd(struct lbs_private *priv, /* Exit_PS command needs to be queued in the header always. */ if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) { - struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf; + struct cmd_ds_802_11_ps_mode *psm = (void *)cmdnode->cmdbuf; if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { if (priv->psstate != PS_STATE_FULL_POWER) @@ -1387,7 +1387,7 @@ int lbs_execute_next_command(struct lbs_private *priv) * PS command. Ignore it if it is not Exit_PS. * otherwise send it down immediately. */ - struct cmd_ds_802_11_ps_mode *psm = (void *)&cmd[1]; + struct cmd_ds_802_11_ps_mode *psm = (void *)cmd; lbs_deb_host( "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", @@ -1428,40 +1428,14 @@ int lbs_execute_next_command(struct lbs_private *priv) * check if in power save mode, if yes, put the device back * to PS mode */ -#ifdef TODO - /* - * This was the old code for libertas+wext. Someone that - * understands this beast should re-code it in a sane way. - * - * I actually don't understand why this is related to WPA - * and to connection status, shouldn't powering should be - * independ of such things? - */ if ((priv->psmode != LBS802_11POWERMODECAM) && (priv->psstate == PS_STATE_FULL_POWER) && - ((priv->connect_status == LBS_CONNECTED) || - lbs_mesh_connected(priv))) { - if (priv->secinfo.WPAenabled || - priv->secinfo.WPA2enabled) { - /* check for valid WPA group keys */ - if (priv->wpa_mcast_key.len || - priv->wpa_unicast_key.len) { - lbs_deb_host( - "EXEC_NEXT_CMD: WPA enabled and GTK_SET" - " go back to PS_SLEEP"); - lbs_set_ps_mode(priv, - PS_MODE_ACTION_ENTER_PS, - false); - } - } else { - lbs_deb_host( - "EXEC_NEXT_CMD: cmdpendingq empty, " - "go back to PS_SLEEP"); - lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, - false); - } + (priv->connect_status == LBS_CONNECTED)) { + lbs_deb_host( + "EXEC_NEXT_CMD: cmdpendingq empty, go back to PS_SLEEP"); + lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, + false); } -#endif } ret = 0; diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index e5442e8956f7..c95bf6dc9522 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -123,7 +123,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) priv->cmd_timed_out = 0; if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { - struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1]; + /* struct cmd_ds_802_11_ps_mode also contains + * the header + */ + struct cmd_ds_802_11_ps_mode *psmode = (void *)resp; u16 action = le16_to_cpu(psmode->action); lbs_deb_host( @@ -254,6 +257,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event) "EVENT: in FULL POWER mode, ignoring PS_SLEEP\n"); break; } + if (!list_empty(&priv->cmdpendingq)) { + lbs_deb_cmd("EVENT: commands in queue, do not sleep\n"); + break; + } priv->psstate = PS_STATE_PRE_SLEEP; lbs_ps_confirm_sleep(priv); diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c index 26cbf1dcc662..faed1823c58e 100644 --- a/drivers/net/wireless/marvell/libertas/debugfs.c +++ b/drivers/net/wireless/marvell/libertas/debugfs.c @@ -56,19 +56,15 @@ static ssize_t lbs_sleepparams_write(struct file *file, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t buf_size, ret; + ssize_t ret; struct sleep_params sp; int p1, p2, p3, p4, p5, p6; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(user_buf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, user_buf, buf_size)) { - ret = -EFAULT; - goto out_unlock; - } ret = sscanf(buf, "%d %d %d %d %d %d", &p1, &p2, &p3, &p4, &p5, &p6); if (ret != 6) { ret = -EINVAL; @@ -88,7 +84,7 @@ static ssize_t lbs_sleepparams_write(struct file *file, ret = -EINVAL; out_unlock: - free_page(addr); + kfree(buf); return ret; } @@ -125,18 +121,14 @@ static ssize_t lbs_host_sleep_write(struct file *file, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t buf_size, ret; + ssize_t ret; int host_sleep; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(user_buf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, user_buf, buf_size)) { - ret = -EFAULT; - goto out_unlock; - } ret = sscanf(buf, "%d", &host_sleep); if (ret != 1) { ret = -EINVAL; @@ -162,7 +154,7 @@ static ssize_t lbs_host_sleep_write(struct file *file, ret = count; out_unlock: - free_page(addr); + kfree(buf); return ret; } @@ -281,21 +273,15 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask, struct cmd_ds_802_11_subscribe_event *events; struct mrvl_ie_thresholds *tlv; struct lbs_private *priv = file->private_data; - ssize_t buf_size; int value, freq, new_mask; uint16_t curr_mask; char *buf; int ret; - buf = (char *)get_zeroed_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - ret = -EFAULT; - goto out_page; - } ret = sscanf(buf, "%d %d %d", &value, &freq, &new_mask); if (ret != 3) { ret = -EINVAL; @@ -343,7 +329,7 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask, out_events: kfree(events); out_page: - free_page((unsigned long)buf); + kfree(buf); return ret; } @@ -472,22 +458,15 @@ static ssize_t lbs_rdmac_write(struct file *file, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } priv->mac_offset = simple_strtoul(buf, NULL, 16); - res = count; -out_unlock: - free_page(addr); - return res; + kfree(buf); + return count; } static ssize_t lbs_wrmac_write(struct file *file, @@ -496,18 +475,14 @@ static ssize_t lbs_wrmac_write(struct file *file, { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; + ssize_t res; u32 offset, value; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } res = sscanf(buf, "%x %x", &offset, &value); if (res != 2) { res = -EFAULT; @@ -520,7 +495,7 @@ static ssize_t lbs_wrmac_write(struct file *file, if (!res) res = count; out_unlock: - free_page(addr); + kfree(buf); return res; } @@ -554,22 +529,16 @@ static ssize_t lbs_rdbbp_write(struct file *file, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } priv->bbp_offset = simple_strtoul(buf, NULL, 16); - res = count; -out_unlock: - free_page(addr); - return res; + kfree(buf); + + return count; } static ssize_t lbs_wrbbp_write(struct file *file, @@ -578,18 +547,14 @@ static ssize_t lbs_wrbbp_write(struct file *file, { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; + ssize_t res; u32 offset, value; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } res = sscanf(buf, "%x %x", &offset, &value); if (res != 2) { res = -EFAULT; @@ -602,7 +567,7 @@ static ssize_t lbs_wrbbp_write(struct file *file, if (!res) res = count; out_unlock: - free_page(addr); + kfree(buf); return res; } @@ -636,22 +601,15 @@ static ssize_t lbs_rdrf_write(struct file *file, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } priv->rf_offset = simple_strtoul(buf, NULL, 16); - res = count; -out_unlock: - free_page(addr); - return res; + kfree(buf); + return count; } static ssize_t lbs_wrrf_write(struct file *file, @@ -660,18 +618,14 @@ static ssize_t lbs_wrrf_write(struct file *file, { struct lbs_private *priv = file->private_data; - ssize_t res, buf_size; + ssize_t res; u32 offset, value; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - if (!buf) - return -ENOMEM; + char *buf; + + buf = memdup_user_nul(userbuf, min(count, len - 1)); + if (IS_ERR(buf)) + return PTR_ERR(buf); - buf_size = min(count, len - 1); - if (copy_from_user(buf, userbuf, buf_size)) { - res = -EFAULT; - goto out_unlock; - } res = sscanf(buf, "%x %x", &offset, &value); if (res != 2) { res = -EFAULT; @@ -684,7 +638,7 @@ static ssize_t lbs_wrrf_write(struct file *file, if (!res) res = count; out_unlock: - free_page(addr); + kfree(buf); return res; } @@ -915,16 +869,9 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, if (cnt == 0) return 0; - pdata = kmalloc(cnt + 1, GFP_KERNEL); - if (pdata == NULL) - return 0; - - if (copy_from_user(pdata, buf, cnt)) { - lbs_deb_debugfs("Copy from user failed\n"); - kfree(pdata); - return 0; - } - pdata[cnt] = '\0'; + pdata = memdup_user_nul(buf, cnt); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); p0 = pdata; for (i = 0; i < num_of_items; i++) { diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index 6bd1608992b0..edf710bc5e77 100644 --- a/drivers/net/wireless/marvell/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h @@ -99,6 +99,7 @@ struct lbs_private { /* Hardware access */ void *card; bool iface_running; + u8 is_polling; /* host has to poll the card irq */ u8 fw_ready; u8 surpriseremoved; u8 setup_fw_on_resume; diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 68fd3a9779bd..13eae9ff8c35 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1267,7 +1267,7 @@ static int if_sdio_probe(struct sdio_func *func, priv->reset_card = if_sdio_reset_card; priv->power_save = if_sdio_power_save; priv->power_restore = if_sdio_power_restore; - + priv->is_polling = !(func->card->host->caps & MMC_CAP_SDIO_IRQ); ret = if_sdio_power_on(card); if (ret) goto err_activate_card; diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index dff08a2896a3..aba0c9995b14 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -267,6 +267,7 @@ static int if_usb_probe(struct usb_interface *intf, priv->enter_deep_sleep = NULL; priv->exit_deep_sleep = NULL; priv->reset_deep_sleep_wakeup = NULL; + priv->is_polling = false; #ifdef CONFIG_OLPC if (machine_is_olpc()) priv->reset_card = if_usb_reset_olpc_card; diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index 8079560f4965..b35b8bcce24c 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -1060,7 +1060,12 @@ void lbs_remove_card(struct lbs_private *priv) if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { priv->psmode = LBS802_11POWERMODECAM; - lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); + /* no need to wakeup if already woken up, + * on suspend, this exit ps command is not processed + * the driver hangs + */ + if (priv->psstate != PS_STATE_FULL_POWER) + lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); } if (priv->is_deep_sleep) { diff --git a/drivers/net/wireless/marvell/mwifiex/README b/drivers/net/wireless/marvell/mwifiex/README index 2f0f9b5609d0..24e649b1eb24 100644 --- a/drivers/net/wireless/marvell/mwifiex/README +++ b/drivers/net/wireless/marvell/mwifiex/README @@ -237,4 +237,14 @@ device_dump cat fw_dump +verext + This command is used to get extended firmware version string using + different configuration parameters. + + Usage: + echo "[version_str_sel]" > verext + cat verext + + [version_str_sel]: firmware support several extend version + string cases, include 0/1/10/20/21/99 =============================================================================== diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index e7adef72c05f..f2dce81ba36e 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1962,6 +1962,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + if (!mwifiex_stop_bg_scan(priv)) + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; @@ -2217,6 +2220,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, "info: Trying to associate to %s and bssid %pM\n", (char *)sme->ssid, sme->bssid); + if (!mwifiex_stop_bg_scan(priv)) + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); if (!ret) { @@ -2420,6 +2426,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } + if (!mwifiex_stop_bg_scan(priv)) + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); if (!user_scan_cfg) return -ENOMEM; @@ -2487,6 +2496,125 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return 0; } +/* CFG802.11 operation handler for sched_scan_start. + * + * This function issues a bgscan config request to the firmware based upon + * the user specified sched_scan configuration. On successful completion, + * firmware will generate BGSCAN_REPORT event, driver should issue bgscan + * query command to get sched_scan results from firmware. + */ +static int +mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + int i, offset; + struct ieee80211_channel *chan; + struct mwifiex_bg_scan_cfg *bgscan_cfg; + struct ieee_types_header *ie; + + if (!request || (!request->n_ssids && !request->n_match_sets)) { + wiphy_err(wiphy, "%s : Invalid Sched_scan parameters", + __func__); + return -EINVAL; + } + + wiphy_info(wiphy, "sched_scan start : n_ssids=%d n_match_sets=%d ", + request->n_ssids, request->n_match_sets); + wiphy_info(wiphy, "n_channels=%d interval=%d ie_len=%d\n", + request->n_channels, request->scan_plans->interval, + (int)request->ie_len); + + bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL); + if (!bgscan_cfg) + return -ENOMEM; + + if (priv->scan_request || priv->scan_aborting) + bgscan_cfg->start_later = true; + + bgscan_cfg->num_ssids = request->n_match_sets; + bgscan_cfg->ssid_list = request->match_sets; + + if (request->ie && request->ie_len) { + offset = 0; + for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { + if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR) + continue; + priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_BGSCAN; + ie = (struct ieee_types_header *)(request->ie + offset); + memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len); + offset += sizeof(*ie) + ie->len; + + if (offset >= request->ie_len) + break; + } + } + + for (i = 0; i < min_t(u32, request->n_channels, + MWIFIEX_BG_SCAN_CHAN_MAX); i++) { + chan = request->channels[i]; + bgscan_cfg->chan_list[i].chan_number = chan->hw_value; + bgscan_cfg->chan_list[i].radio_type = chan->band; + + if ((chan->flags & IEEE80211_CHAN_NO_IR) || !request->n_ssids) + bgscan_cfg->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_PASSIVE; + else + bgscan_cfg->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_ACTIVE; + + bgscan_cfg->chan_list[i].scan_time = 0; + } + + bgscan_cfg->chan_per_scan = min_t(u32, request->n_channels, + MWIFIEX_BG_SCAN_CHAN_MAX); + + /* Use at least 15 second for per scan cycle */ + bgscan_cfg->scan_interval = (request->scan_plans->interval > + MWIFIEX_BGSCAN_INTERVAL) ? + request->scan_plans->interval : + MWIFIEX_BGSCAN_INTERVAL; + + bgscan_cfg->repeat_count = MWIFIEX_BGSCAN_REPEAT_COUNT; + bgscan_cfg->report_condition = MWIFIEX_BGSCAN_SSID_MATCH | + MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE; + bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA; + bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET; + bgscan_cfg->enable = true; + if (request->min_rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) { + bgscan_cfg->report_condition |= MWIFIEX_BGSCAN_SSID_RSSI_MATCH; + bgscan_cfg->rssi_threshold = request->min_rssi_thold; + } + + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG, + HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) { + kfree(bgscan_cfg); + return -EFAULT; + } + + priv->sched_scanning = true; + + kfree(bgscan_cfg); + return 0; +} + +/* CFG802.11 operation handler for sched_scan_stop. + * + * This function issues a bgscan config command to disable + * previous bgscan configuration in the firmware + */ +static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy, + struct net_device *dev) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + wiphy_info(wiphy, "sched scan stop!"); + mwifiex_stop_bg_scan(priv); + + return 0; +} + static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info, struct mwifiex_private *priv) { @@ -2848,6 +2976,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) mwifiex_dev_debugfs_remove(priv); #endif + if (priv->sched_scanning) + priv->sched_scanning = false; + mwifiex_stop_net_dev_queue(priv->netdev, adapter); skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) @@ -3044,10 +3175,12 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, sizeof(byte_seq)); mef_entry->filter[filt_num].filt_type = TYPE_EQ; - if (first_pat) + if (first_pat) { first_pat = false; - else + mwifiex_dbg(priv->adapter, INFO, "Wake on patterns\n"); + } else { mef_entry->filter[filt_num].filt_action = TYPE_AND; + } filt_num++; } @@ -3073,6 +3206,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, mef_entry->filter[filt_num].offset = 56; mef_entry->filter[filt_num].filt_type = TYPE_EQ; mef_entry->filter[filt_num].filt_action = TYPE_OR; + mwifiex_dbg(priv->adapter, INFO, "Wake on magic packet\n"); } return ret; } @@ -3143,7 +3277,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); - if (!priv->media_connected) { + if (!priv->media_connected && !wowlan->nd_config) { mwifiex_dbg(adapter, ERROR, "Can not configure WOWLAN in disconnected state\n"); return 0; @@ -3155,19 +3289,30 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, return ret; } + memset(&hs_cfg, 0, sizeof(hs_cfg)); + hs_cfg.conditions = le32_to_cpu(adapter->hs_cfg.conditions); + + if (wowlan->nd_config) { + mwifiex_dbg(adapter, INFO, "Wake on net detect\n"); + hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; + mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev, + wowlan->nd_config); + } + if (wowlan->disconnect) { - memset(&hs_cfg, 0, sizeof(hs_cfg)); - hs_cfg.is_invoke_hostcmd = false; - hs_cfg.conditions = HS_CFG_COND_MAC_EVENT; - hs_cfg.gpio = adapter->hs_cfg.gpio; - hs_cfg.gap = adapter->hs_cfg.gap; - ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, - MWIFIEX_SYNC_CMD, &hs_cfg); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "Failed to set HS params\n"); - return ret; - } + hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; + mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n"); + } + + hs_cfg.is_invoke_hostcmd = false; + hs_cfg.gpio = adapter->hs_cfg.gpio; + hs_cfg.gap = adapter->hs_cfg.gap; + ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, + MWIFIEX_SYNC_CMD, &hs_cfg); + if (ret) { + mwifiex_dbg(adapter, ERROR, + "Failed to set HS params\n"); + return ret; } return ret; @@ -3175,6 +3320,64 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, static int mwifiex_cfg80211_resume(struct wiphy *wiphy) { + struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); + struct mwifiex_private *priv = + mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + struct mwifiex_ds_wakeup_reason wakeup_reason; + struct cfg80211_wowlan_wakeup wakeup_report; + int i; + + mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD, + &wakeup_reason); + memset(&wakeup_report, 0, sizeof(struct cfg80211_wowlan_wakeup)); + + wakeup_report.pattern_idx = -1; + + switch (wakeup_reason.hs_wakeup_reason) { + case NO_HSWAKEUP_REASON: + break; + case BCAST_DATA_MATCHED: + break; + case MCAST_DATA_MATCHED: + break; + case UCAST_DATA_MATCHED: + break; + case MASKTABLE_EVENT_MATCHED: + break; + case NON_MASKABLE_EVENT_MATCHED: + if (wiphy->wowlan_config->disconnect) + wakeup_report.disconnect = true; + if (wiphy->wowlan_config->nd_config) + wakeup_report.net_detect = adapter->nd_info; + break; + case NON_MASKABLE_CONDITION_MATCHED: + break; + case MAGIC_PATTERN_MATCHED: + if (wiphy->wowlan_config->magic_pkt) + wakeup_report.magic_pkt = true; + if (wiphy->wowlan_config->n_patterns) + wakeup_report.pattern_idx = 1; + break; + case CONTROL_FRAME_MATCHED: + break; + case MANAGEMENT_FRAME_MATCHED: + break; + default: + break; + } + + if ((wakeup_reason.hs_wakeup_reason > 0) && + (wakeup_reason.hs_wakeup_reason <= 7)) + cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report, + GFP_KERNEL); + + if (adapter->nd_info) { + for (i = 0 ; i < adapter->nd_info->n_matches ; i++) + kfree(adapter->nd_info->matches[i]); + kfree(adapter->nd_info); + adapter->nd_info = NULL; + } + return 0; } @@ -3590,8 +3793,8 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy, freq = ieee80211_channel_to_frequency(curr_bss->channel, band); chan = ieee80211_get_channel(wiphy, freq); - if (curr_bss->bcn_ht_oper) { - second_chan_offset = curr_bss->bcn_ht_oper->ht_param & + if (priv->ht_param_present) { + second_chan_offset = priv->assoc_resp_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; chan_type = mwifiex_sec_chan_offset_to_chan_type (second_chan_offset); @@ -3701,6 +3904,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, .set_antenna = mwifiex_cfg80211_set_antenna, .del_station = mwifiex_cfg80211_del_station, + .sched_scan_start = mwifiex_cfg80211_sched_scan_start, + .sched_scan_stop = mwifiex_cfg80211_sched_scan_stop, #ifdef CONFIG_PM .suspend = mwifiex_cfg80211_suspend, .resume = mwifiex_cfg80211_resume, @@ -3720,11 +3925,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #ifdef CONFIG_PM static const struct wiphy_wowlan_support mwifiex_wowlan_support = { - .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_NET_DETECT, .n_patterns = MWIFIEX_MEF_MAX_FILTERS, .pattern_min_len = 1, .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, + .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS, }; #endif @@ -3829,6 +4036,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_SUPPORTS_SCHED_SCAN | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -3847,6 +4055,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; + wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; + wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH; + wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index cb25aa7e90db..a12adee776c6 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1657,3 +1657,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, return 0; } + +/* This function handles the command response of hs wakeup reason + * command. + */ +int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, + struct host_cmd_ds_wakeup_reason *wakeup_reason) +{ + wakeup_reason->wakeup_reason = + resp->params.hs_wakeup_reason.wakeup_reason; + + return 0; +} diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 5e5562909d9f..df28836a1d11 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -95,8 +95,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf, mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1); - if (!priv->version_str[0]) - mwifiex_get_ver_ext(priv); + mwifiex_get_ver_ext(priv, 0); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); p += sprintf(p, "driver_version = %s", fmt); @@ -447,20 +446,13 @@ static ssize_t mwifiex_regrdwr_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *) addr; - size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1); + char *buf; int ret; u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX; - if (!buf) - return -ENOMEM; - - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); sscanf(buf, "%u %x %x", ®_type, ®_offset, ®_value); @@ -474,7 +466,7 @@ mwifiex_regrdwr_write(struct file *file, ret = count; } done: - free_page(addr); + kfree(buf); return ret; } @@ -572,17 +564,11 @@ mwifiex_debug_mask_write(struct file *file, const char __user *ubuf, int ret; unsigned long debug_mask; struct mwifiex_private *priv = (void *)file->private_data; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (void *)addr; - size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); - - if (!buf) - return -ENOMEM; + char *buf; - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); if (kstrtoul(buf, 0, &debug_mask)) { ret = -EINVAL; @@ -592,10 +578,56 @@ mwifiex_debug_mask_write(struct file *file, const char __user *ubuf, priv->adapter->debug_mask = debug_mask; ret = count; done: - free_page(addr); + kfree(buf); return ret; } +/* debugfs verext file write handler. + * This function is called when the 'verext' file is opened for write + */ +static ssize_t +mwifiex_verext_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + int ret; + u32 versionstrsel; + struct mwifiex_private *priv = (void *)file->private_data; + char buf[16]; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = kstrtou32(buf, 10, &versionstrsel); + if (ret) + return ret; + + priv->versionstrsel = versionstrsel; + + return count; +} + +/* Proc verext file read handler. + * This function is called when the 'verext' file is opened for reading + * This function can be used read driver exteneed verion string. + */ +static ssize_t +mwifiex_verext_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = + (struct mwifiex_private *)file->private_data; + char buf[256]; + int ret; + + mwifiex_get_ver_ext(priv, priv->versionstrsel); + ret = snprintf(buf, sizeof(buf), "version string: %s\n", + priv->version_str); + + return simple_read_from_buffer(ubuf, count, ppos, buf, ret); +} + /* Proc memrw file write handler. * This function is called when the 'memrw' file is opened for writing * This function can be used to write to a memory location. @@ -609,17 +641,11 @@ mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count, struct mwifiex_ds_mem_rw mem_rw; u16 cmd_action; struct mwifiex_private *priv = (void *)file->private_data; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (void *)addr; - size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); - - if (!buf) - return -ENOMEM; + char *buf; - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value); if (ret != 3) { @@ -645,7 +671,7 @@ mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count, ret = count; done: - free_page(addr); + kfree(buf); return ret; } @@ -686,20 +712,13 @@ static ssize_t mwifiex_rdeeprom_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *) addr; - size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1); + char *buf; int ret = 0; int offset = -1, bytes = -1; - if (!buf) - return -ENOMEM; - - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); sscanf(buf, "%d %d", &offset, &bytes); @@ -712,7 +731,7 @@ mwifiex_rdeeprom_write(struct file *file, ret = count; } done: - free_page(addr); + kfree(buf); return ret; } @@ -771,21 +790,15 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct mwifiex_private *priv = (void *)file->private_data; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1); + char *buf; int ret, arg_num; struct mwifiex_ds_hs_cfg hscfg; int conditions = HS_CFG_COND_DEF; u32 gpio = HS_CFG_GPIO_DEF, gap = HS_CFG_GAP_DEF; - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, ubuf, buf_size)) { - ret = -EFAULT; - goto done; - } + buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1))); + if (IS_ERR(buf)) + return PTR_ERR(buf); arg_num = sscanf(buf, "%d %x %x", &conditions, &gpio, &gap); @@ -823,7 +836,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf, priv->adapter->hs_enabling = false; ret = count; done: - free_page(addr); + kfree(buf); return ret; } @@ -972,6 +985,7 @@ MWIFIEX_DFS_FILE_OPS(histogram); MWIFIEX_DFS_FILE_OPS(debug_mask); MWIFIEX_DFS_FILE_OPS(timeshare_coex); MWIFIEX_DFS_FILE_WRITE_OPS(reset); +MWIFIEX_DFS_FILE_OPS(verext); /* * This function creates the debug FS directory structure and the files. @@ -1000,6 +1014,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(debug_mask); MWIFIEX_DFS_ADD_FILE(timeshare_coex); MWIFIEX_DFS_ADD_FILE(reset); + MWIFIEX_DFS_ADD_FILE(verext); } /* diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index d9c15cd36f12..bec300b9c2ea 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -122,6 +122,8 @@ #define BLOCK_NUMBER_OFFSET 15 #define SDIO_HEADER_OFFSET 28 +#define MWIFIEX_SIZE_4K 0x4000 + enum mwifiex_bss_type { MWIFIEX_BSS_TYPE_STA = 0, MWIFIEX_BSS_TYPE_UAP = 1, @@ -270,4 +272,26 @@ struct mwifiex_11h_intf_state { bool is_11h_enabled; bool is_11h_active; } __packed; + +#define MWIFIEX_FW_DUMP_IDX 0xff +#define MWIFIEX_FW_DUMP_MAX_MEMSIZE 0x160000 +#define MWIFIEX_DRV_INFO_IDX 20 +#define FW_DUMP_MAX_NAME_LEN 8 +#define FW_DUMP_HOST_READY 0xEE +#define FW_DUMP_DONE 0xFF +#define FW_DUMP_READ_DONE 0xFE + +struct memory_type_mapping { + u8 mem_name[FW_DUMP_MAX_NAME_LEN]; + u8 *mem_ptr; + u32 mem_size; + u8 done_flag; +}; + +enum rdwr_status { + RDWR_STATUS_SUCCESS = 0, + RDWR_STATUS_FAILURE = 1, + RDWR_STATUS_DONE = 2 +}; + #endif /* !_MWIFIEX_DECL_H_ */ diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index ced7af2be29a..c134cf865291 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -96,7 +96,7 @@ enum KEY_TYPE_ID { #define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2) #define MAX_POLL_TRIES 100 -#define MAX_FIRMWARE_POLL_TRIES 100 +#define MAX_FIRMWARE_POLL_TRIES 150 #define FIRMWARE_READY_SDIO 0xfedc #define FIRMWARE_READY_PCIE 0xfedcba00 @@ -144,6 +144,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18) #define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) #define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22) +#define TLV_TYPE_BGSCAN_START_LATER (PROPRIETARY_TLV_BASE_ID + 30) #define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) #define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32) #define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35) @@ -177,6 +178,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148) #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) +#define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176) #define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183) #define TLV_TYPE_MC_GROUP_INFO (PROPRIETARY_TLV_BASE_ID + 184) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) @@ -331,6 +333,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_802_11_MAC_ADDRESS 0x004D #define HostCmd_CMD_802_11D_DOMAIN_INFO 0x005b #define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e +#define HostCmd_CMD_802_11_BG_SCAN_CONFIG 0x006b #define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c #define HostCmd_CMD_WMM_GET_STATUS 0x0071 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 @@ -370,6 +373,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_MGMT_FRAME_REG 0x010c #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d #define HostCmd_CMD_11AC_CFG 0x0112 +#define HostCmd_CMD_HS_WAKEUP_REASON 0x0116 #define HostCmd_CMD_TDLS_CONFIG 0x0100 #define HostCmd_CMD_MC_POLICY 0x0121 #define HostCmd_CMD_TDLS_OPER 0x0122 @@ -523,6 +527,7 @@ enum P2P_MODES { #define EVENT_CHANNEL_REPORT_RDY 0x00000054 #define EVENT_TX_DATA_PAUSE 0x00000055 #define EVENT_EXT_SCAN_REPORT 0x00000058 +#define EVENT_BG_SCAN_STOPPED 0x00000065 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f #define EVENT_MULTI_CHAN_INFO 0x0000006a #define EVENT_TX_STATUS_REPORT 0x00000074 @@ -539,6 +544,8 @@ enum P2P_MODES { #define MWIFIEX_MAX_PATTERN_LEN 40 #define MWIFIEX_MAX_OFFSET_LEN 100 +#define MWIFIEX_MAX_ND_MATCH_SETS 10 + #define STACK_NBYTES 100 #define TYPE_DNUM 1 #define TYPE_BYTESEQ 2 @@ -601,6 +608,20 @@ struct mwifiex_ie_types_data { #define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01 #define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS 0x20 +enum HS_WAKEUP_REASON { + NO_HSWAKEUP_REASON = 0, + BCAST_DATA_MATCHED, + MCAST_DATA_MATCHED, + UCAST_DATA_MATCHED, + MASKTABLE_EVENT_MATCHED, + NON_MASKABLE_EVENT_MATCHED, + NON_MASKABLE_CONDITION_MATCHED, + MAGIC_PATTERN_MATCHED, + CONTROL_FRAME_MATCHED, + MANAGEMENT_FRAME_MATCHED, + RESERVED +}; + struct txpd { u8 bss_type; u8 bss_num; @@ -733,6 +754,21 @@ struct mwifiex_ie_types_num_probes { __le16 num_probes; } __packed; +struct mwifiex_ie_types_repeat_count { + struct mwifiex_ie_types_header header; + __le16 repeat_count; +} __packed; + +struct mwifiex_ie_types_min_rssi_threshold { + struct mwifiex_ie_types_header header; + __le16 rssi_threshold; +} __packed; + +struct mwifiex_ie_types_bgscan_start_later { + struct mwifiex_ie_types_header header; + __le16 start_later; +} __packed; + struct mwifiex_ie_types_scan_chan_gap { struct mwifiex_ie_types_header header; /* time gap in TUs to be used between two consecutive channels scan */ @@ -1027,7 +1063,7 @@ struct ieee_types_assoc_rsp { __le16 cap_info_bitmap; __le16 status_code; __le16 a_id; - u8 ie_buffer[1]; + u8 ie_buffer[0]; } __packed; struct host_cmd_ds_802_11_associate_rsp { @@ -1425,6 +1461,36 @@ struct mwifiex_user_scan_cfg { u16 scan_chan_gap; } __packed; +#define MWIFIEX_BG_SCAN_CHAN_MAX 38 +#define MWIFIEX_BSS_MODE_INFRA 1 +#define MWIFIEX_BGSCAN_ACT_GET 0x0000 +#define MWIFIEX_BGSCAN_ACT_SET 0x0001 +#define MWIFIEX_BGSCAN_ACT_SET_ALL 0xff01 +/** ssid match */ +#define MWIFIEX_BGSCAN_SSID_MATCH 0x0001 +/** ssid match and RSSI exceeded */ +#define MWIFIEX_BGSCAN_SSID_RSSI_MATCH 0x0004 +/**wait for all channel scan to complete to report scan result*/ +#define MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE 0x80000000 + +struct mwifiex_bg_scan_cfg { + u16 action; + u8 enable; + u8 bss_type; + u8 chan_per_scan; + u32 scan_interval; + u32 report_condition; + u8 num_probes; + u8 rssi_threshold; + u8 snr_threshold; + u16 repeat_count; + u16 start_later; + struct cfg80211_match_set *ssid_list; + u8 num_ssids; + struct mwifiex_user_scan_chan chan_list[MWIFIEX_BG_SCAN_CHAN_MAX]; + u16 scan_chan_gap; +} __packed; + struct ie_body { u8 grp_key_oui[4]; u8 ptk_cnt[2]; @@ -1470,6 +1536,20 @@ struct mwifiex_ie_types_bss_scan_info { __le64 tsf; } __packed; +struct host_cmd_ds_802_11_bg_scan_config { + __le16 action; + u8 enable; + u8 bss_type; + u8 chan_per_scan; + u8 reserved; + __le16 reserved1; + __le32 scan_interval; + __le32 reserved2; + __le32 report_condition; + __le16 reserved3; + u8 tlv[0]; +} __packed; + struct host_cmd_ds_802_11_bg_scan_query { u8 flush; } __packed; @@ -2099,6 +2179,10 @@ struct host_cmd_ds_robust_coex { __le16 reserved; } __packed; +struct host_cmd_ds_wakeup_reason { + u16 wakeup_reason; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2124,6 +2208,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_802_11_scan scan; struct host_cmd_ds_802_11_scan_ext ext_scan; struct host_cmd_ds_802_11_scan_rsp scan_resp; + struct host_cmd_ds_802_11_bg_scan_config bg_scan_config; struct host_cmd_ds_802_11_bg_scan_query bg_scan_query; struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp; struct host_cmd_ds_802_11_associate associate; @@ -2170,6 +2255,7 @@ struct host_cmd_ds_command { struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg; struct host_cmd_ds_multi_chan_policy mc_policy; struct host_cmd_ds_robust_coex coex; + struct host_cmd_ds_wakeup_reason hs_wakeup_reason; } params; } __packed; diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 6f7876ec31b7..517653b3adab 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -741,8 +741,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, u32 poll_num = 1; if (adapter->if_ops.check_fw_status) { - adapter->winner = 0; - /* check if firmware is already running */ ret = adapter->if_ops.check_fw_status(adapter, poll_num); if (!ret) { @@ -750,13 +748,23 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, "WLAN FW already running! Skip FW dnld\n"); return 0; } + } + + /* check if we are the winner for downloading FW */ + if (adapter->if_ops.check_winner_status) { + adapter->winner = 0; + ret = adapter->if_ops.check_winner_status(adapter); poll_num = MAX_FIRMWARE_POLL_TRIES; + if (ret) { + mwifiex_dbg(adapter, MSG, + "WLAN read winner status failed!\n"); + return ret; + } - /* check if we are the winner for downloading FW */ if (!adapter->winner) { mwifiex_dbg(adapter, MSG, - "FW already running! Skip FW dnld\n"); + "WLAN is not the winner! Skip FW dnld\n"); goto poll_fw; } } diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 4f0174c64946..14cfa37deb00 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -271,6 +271,10 @@ struct mwifiex_ds_hs_cfg { u32 gap; }; +struct mwifiex_ds_wakeup_reason { + u16 hs_wakeup_reason; +}; + #define DEEP_SLEEP_ON 1 #define DEEP_SLEEP_OFF 0 #define DEEP_SLEEP_IDLE_TIME 100 @@ -414,6 +418,7 @@ struct mwifiex_ds_mef_cfg { #define MWIFIEX_VSIE_MASK_SCAN 0x01 #define MWIFIEX_VSIE_MASK_ASSOC 0x02 #define MWIFIEX_VSIE_MASK_ADHOC 0x04 +#define MWIFIEX_VSIE_MASK_BGSCAN 0x08 enum { MWIFIEX_FUNC_INIT = 1, diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index cc09a81dbf6a..62211fca91b7 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -644,6 +644,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc; bool enable_data = true; u16 cap_info, status_code, aid; + const u8 *ie_ptr; + struct ieee80211_ht_operation *assoc_resp_ht_oper; assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; @@ -733,6 +735,19 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, = ((bss_desc->wmm_ie.qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0); + /* Store the bandwidth information from assoc response */ + ie_ptr = cfg80211_find_ie(WLAN_EID_HT_OPERATION, assoc_rsp->ie_buffer, + priv->assoc_rsp_size + - sizeof(struct ieee_types_assoc_rsp)); + if (ie_ptr) { + assoc_resp_ht_oper = (struct ieee80211_ht_operation *)(ie_ptr + + sizeof(struct ieee_types_header)); + priv->assoc_resp_ht_param = assoc_resp_ht_oper->ht_param; + priv->ht_param_present = true; + } else { + priv->ht_param_present = false; + } + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: curr_pkt_filter is %#x\n", priv->curr_pkt_filter); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 79c16de8743e..3cfa94677a8e 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -132,6 +132,13 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) } } + if (adapter->nd_info) { + for (i = 0 ; i < adapter->nd_info->n_matches ; i++) + kfree(adapter->nd_info->matches[i]); + kfree(adapter->nd_info); + adapter->nd_info = NULL; + } + vfree(adapter->chan_stats); kfree(adapter); return 0; @@ -746,6 +753,13 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) mwifiex_queue_main_work(priv->adapter); + if (priv->sched_scanning) { + mwifiex_dbg(priv->adapter, INFO, + "aborting bgscan on ndo_stop\n"); + mwifiex_stop_bg_scan(priv); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); + } + return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 2f7f478ce04b..aea7aee46cf7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -198,6 +198,11 @@ do { \ buf, len, false); \ } while (0) +/** Min BGSCAN interval 15 second */ +#define MWIFIEX_BGSCAN_INTERVAL 15000 +/** default repeat count */ +#define MWIFIEX_BGSCAN_REPEAT_COUNT 6 + struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; @@ -293,6 +298,7 @@ struct mwifiex_tid_tbl { #define WMM_HIGHEST_PRIORITY 7 #define HIGH_PRIO_TID 7 #define LOW_PRIO_TID 0 +#define MWIFIEX_WMM_DRV_DELAY_MAX 510 struct mwifiex_wmm_desc { struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; @@ -483,26 +489,6 @@ struct mwifiex_roc_cfg { struct ieee80211_channel chan; }; -#define MWIFIEX_FW_DUMP_IDX 0xff -#define MWIFIEX_DRV_INFO_IDX 20 -#define FW_DUMP_MAX_NAME_LEN 8 -#define FW_DUMP_HOST_READY 0xEE -#define FW_DUMP_DONE 0xFF -#define FW_DUMP_READ_DONE 0xFE - -struct memory_type_mapping { - u8 mem_name[FW_DUMP_MAX_NAME_LEN]; - u8 *mem_ptr; - u32 mem_size; - u8 done_flag; -}; - -enum rdwr_status { - RDWR_STATUS_SUCCESS = 0, - RDWR_STATUS_FAILURE = 1, - RDWR_STATUS_DONE = 2 -}; - enum mwifiex_iface_work_flags { MWIFIEX_IFACE_WORK_DEVICE_DUMP, MWIFIEX_IFACE_WORK_CARD_RESET, @@ -616,6 +602,7 @@ struct mwifiex_private { spinlock_t curr_bcn_buf_lock; struct wireless_dev wdev; struct mwifiex_chan_freq_power cfp; + u32 versionstrsel; char version_str[128]; #ifdef CONFIG_DEBUG_FS struct dentry *dfs_dev_dir; @@ -640,6 +627,7 @@ struct mwifiex_private { u32 mgmt_frame_mask; struct mwifiex_roc_cfg roc_cfg; bool scan_aborting; + u8 sched_scanning; u8 csa_chan; unsigned long csa_expire_time; u8 del_list_idx; @@ -667,6 +655,8 @@ struct mwifiex_private { struct mwifiex_ds_mem_rw mem_rw; struct sk_buff_head bypass_txq; struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX]; + u8 assoc_resp_ht_param; + bool ht_param_present; }; @@ -791,6 +781,7 @@ struct mwifiex_if_ops { int (*init_if) (struct mwifiex_adapter *); void (*cleanup_if) (struct mwifiex_adapter *); int (*check_fw_status) (struct mwifiex_adapter *, u32); + int (*check_winner_status)(struct mwifiex_adapter *); int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); int (*register_dev) (struct mwifiex_adapter *); void (*unregister_dev) (struct mwifiex_adapter *); @@ -994,6 +985,7 @@ struct mwifiex_adapter { u8 active_scan_triggered; bool usb_mc_status; bool usb_mc_setup; + struct cfg80211_wowlan_nd_info *nd_info; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); @@ -1196,6 +1188,10 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, void *buf); +int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + void *data_buf); +int mwifiex_stop_bg_scan(struct mwifiex_private *priv); /* * This function checks if the queuing is RA based or not. @@ -1417,7 +1413,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len); -int mwifiex_get_ver_ext(struct mwifiex_private *priv); +int mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel); int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, struct ieee80211_channel *chan, @@ -1586,6 +1582,12 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); +int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, + int cmd_type, + struct mwifiex_ds_wakeup_reason *wakeup_reason); +int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, + struct host_cmd_ds_wakeup_reason *wakeup_reason); void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter); void mwifiex_11n_delba(struct mwifiex_private *priv, int tid); int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 6d0dc40e20e5..efb19e2e1169 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -37,17 +37,6 @@ static struct mwifiex_if_ops pcie_ops; static struct semaphore add_remove_card_sem; -static struct memory_type_mapping mem_type_mapping_tbl[] = { - {"ITCM", NULL, 0, 0xF0}, - {"DTCM", NULL, 0, 0xF1}, - {"SQRAM", NULL, 0, 0xF2}, - {"IRAM", NULL, 0, 0xF3}, - {"APU", NULL, 0, 0xF4}, - {"CIU", NULL, 0, 0xF5}, - {"ICU", NULL, 0, 0xF6}, - {"MAC", NULL, 0, 0xF7}, -}; - static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, size_t size, int flags) @@ -206,6 +195,8 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, card->pcie.blksz_fw_dl = data->blksz_fw_dl; card->pcie.tx_buf_size = data->tx_buf_size; card->pcie.can_dump_fw = data->can_dump_fw; + card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl; + card->pcie.num_mem_types = data->num_mem_types; card->pcie.can_ext_scan = data->can_ext_scan; } @@ -323,6 +314,8 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) struct pcie_service_card *card = adapter->card; *data = ioread32(card->pci_mmap1 + reg); + if (*data == 0xffffffff) + return 0xffffffff; return 0; } @@ -2007,14 +2000,12 @@ done: /* * This function checks the firmware status in card. - * - * The winner interface is also determined by this function. */ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { int ret = 0; - u32 firmware_stat, winner_status; + u32 firmware_stat; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 tries; @@ -2054,19 +2045,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) } } - if (ret) { - if (mwifiex_read_reg(adapter, reg->fw_status, - &winner_status)) - ret = -1; - else if (!winner_status) { - mwifiex_dbg(adapter, INFO, - "PCI-E is the winner\n"); - adapter->winner = 1; - } else { - mwifiex_dbg(adapter, ERROR, - "PCI-E is not the winner <%#x,%d>, exit dnld\n", - ret, adapter->winner); - } + return ret; +} + +/* This function checks if WLAN is the winner. + */ +static int +mwifiex_check_winner_status(struct mwifiex_adapter *adapter) +{ + u32 winner = 0; + int ret = 0; + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + + if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) { + ret = -1; + } else if (!winner) { + mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n"); + adapter->winner = 1; + } else { + mwifiex_dbg(adapter, ERROR, + "PCI-E is not the winner <%#x,%d>, exit dnld\n", + ret, adapter->winner); } return ret; @@ -2075,20 +2075,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) /* * This function reads the interrupt status from card. */ -static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) +static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, + int msg_id) { u32 pcie_ireg; unsigned long flags; + struct pcie_service_card *card = adapter->card; if (!mwifiex_pcie_ok_to_access_hw(adapter)) return; - if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, "Read register failed\n"); - return; - } + if (card->msix_enable && msg_id >= 0) { + pcie_ireg = BIT(msg_id); + } else { + if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, + &pcie_ireg)) { + mwifiex_dbg(adapter, ERROR, "Read register failed\n"); + return; + } + + if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg) + return; - if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { mwifiex_pcie_disable_host_int(adapter); @@ -2099,21 +2107,24 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) "Write register failed\n"); return; } - spin_lock_irqsave(&adapter->int_lock, flags); - adapter->int_status |= pcie_ireg; - spin_unlock_irqrestore(&adapter->int_lock, flags); - - if (!adapter->pps_uapsd_mode && - adapter->ps_state == PS_STATE_SLEEP && - mwifiex_pcie_ok_to_access_hw(adapter)) { - /* Potentially for PCIe we could get other - * interrupts like shared. Don't change power - * state until cookie is set */ - adapter->ps_state = PS_STATE_AWAKE; - adapter->pm_wakeup_fw_try = false; - del_timer(&adapter->wakeup_timer); - } } + + if (!adapter->pps_uapsd_mode && + adapter->ps_state == PS_STATE_SLEEP && + mwifiex_pcie_ok_to_access_hw(adapter)) { + /* Potentially for PCIe we could get other + * interrupts like shared. Don't change power + * state until cookie is set + */ + adapter->ps_state = PS_STATE_AWAKE; + adapter->pm_wakeup_fw_try = false; + del_timer(&adapter->wakeup_timer); + } + + spin_lock_irqsave(&adapter->int_lock, flags); + adapter->int_status |= pcie_ireg; + spin_unlock_irqrestore(&adapter->int_lock, flags); + mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg); } /* @@ -2124,7 +2135,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) */ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) { - struct pci_dev *pdev = (struct pci_dev *)context; + struct mwifiex_msix_context *ctx = context; + struct pci_dev *pdev = ctx->dev; struct pcie_service_card *card; struct mwifiex_adapter *adapter; @@ -2144,7 +2156,11 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) if (adapter->surprise_removed) goto exit; - mwifiex_interrupt_status(adapter); + if (card->msix_enable) + mwifiex_interrupt_status(adapter, ctx->msg_id); + else + mwifiex_interrupt_status(adapter, -1); + mwifiex_queue_main_work(adapter); exit: @@ -2164,7 +2180,7 @@ exit: * In case of Rx packets received, the packets are uploaded from card to * host and processed accordingly. */ -static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) +static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) { int ret; u32 pcie_ireg; @@ -2244,6 +2260,69 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) return 0; } +static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter) +{ + int ret; + u32 pcie_ireg; + unsigned long flags; + + spin_lock_irqsave(&adapter->int_lock, flags); + /* Clear out unused interrupts */ + pcie_ireg = adapter->int_status; + adapter->int_status = 0; + spin_unlock_irqrestore(&adapter->int_lock, flags); + + if (pcie_ireg & HOST_INTR_DNLD_DONE) { + mwifiex_dbg(adapter, INTR, + "info: TX DNLD Done\n"); + ret = mwifiex_pcie_send_data_complete(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_UPLD_RDY) { + mwifiex_dbg(adapter, INTR, + "info: Rx DATA\n"); + ret = mwifiex_pcie_process_recv_data(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_EVENT_RDY) { + mwifiex_dbg(adapter, INTR, + "info: Rx EVENT\n"); + ret = mwifiex_pcie_process_event_ready(adapter); + if (ret) + return ret; + } + + if (pcie_ireg & HOST_INTR_CMD_DONE) { + if (adapter->cmd_sent) { + mwifiex_dbg(adapter, INTR, + "info: CMD sent Interrupt\n"); + adapter->cmd_sent = false; + } + /* Handle command response */ + ret = mwifiex_pcie_process_cmd_complete(adapter); + if (ret) + return ret; + } + + mwifiex_dbg(adapter, INTR, + "info: cmd_sent=%d data_sent=%d\n", + adapter->cmd_sent, adapter->data_sent); + + return 0; +} + +static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + + if (card->msix_enable) + return mwifiex_process_msix_int(adapter); + else + return mwifiex_process_pcie_int(adapter); +} + /* * This function downloads data from driver to card. * @@ -2278,10 +2357,15 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) { int ret, tries; u8 ctrl_data; + u32 fw_status; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); + if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) + return RDWR_STATUS_FAILURE; + + ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, + reg->fw_dump_host_ready); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); @@ -2294,11 +2378,11 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) return RDWR_STATUS_SUCCESS; if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; - if (ctrl_data != FW_DUMP_HOST_READY) { + if (ctrl_data != reg->fw_dump_host_ready) { mwifiex_dbg(adapter, WARN, "The ctrl reg was changed, re-try again!\n"); ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, - FW_DUMP_HOST_READY); + reg->fw_dump_host_ready); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); @@ -2318,7 +2402,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *creg = card->pcie.reg; unsigned int reg, reg_start, reg_end; - u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0; + u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num; + u8 idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; int ret; @@ -2326,8 +2411,9 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) if (!card->pcie.can_dump_fw) return; - for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { - struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { vfree(entry->mem_ptr); @@ -2336,7 +2422,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) entry->mem_size = 0; } - mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); /* Read the number of the memories which will dump */ stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); @@ -2344,28 +2430,38 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) return; reg = creg->fw_dump_start; - mwifiex_read_reg_byte(adapter, reg, &dump_num); + mwifiex_read_reg_byte(adapter, reg, &fw_dump_num); + + /* W8997 chipset firmware dump will be restore in single region*/ + if (fw_dump_num == 0) + dump_num = 1; + else + dump_num = fw_dump_num; /* Read the length of every memory which will dump */ for (idx = 0; idx < dump_num; idx++) { - struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; - - stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); - if (stat == RDWR_STATUS_FAILURE) - return; - + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; memory_size = 0; - reg = creg->fw_dump_start; - for (i = 0; i < 4; i++) { - mwifiex_read_reg_byte(adapter, reg, &read_reg); - memory_size |= (read_reg << (i * 8)); - reg++; + if (fw_dump_num != 0) { + stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); + if (stat == RDWR_STATUS_FAILURE) + return; + + reg = creg->fw_dump_start; + for (i = 0; i < 4; i++) { + mwifiex_read_reg_byte(adapter, reg, &read_reg); + memory_size |= (read_reg << (i * 8)); + reg++; + } + } else { + memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE; } if (memory_size == 0) { mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, - FW_DUMP_READ_DONE); + creg->fw_dump_read_done); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); return; @@ -2400,11 +2496,21 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) mwifiex_read_reg_byte(adapter, reg, dbg_ptr); if (dbg_ptr < end_ptr) { dbg_ptr++; - } else { - mwifiex_dbg(adapter, ERROR, - "Allocated buf not enough\n"); - return; + continue; } + mwifiex_dbg(adapter, ERROR, + "pre-allocated buf not enough\n"); + tmp_ptr = + vzalloc(memory_size + MWIFIEX_SIZE_4K); + if (!tmp_ptr) + return; + memcpy(tmp_ptr, entry->mem_ptr, memory_size); + vfree(entry->mem_ptr); + entry->mem_ptr = tmp_ptr; + tmp_ptr = NULL; + dbg_ptr = entry->mem_ptr + memory_size; + memory_size += MWIFIEX_SIZE_4K; + end_ptr = entry->mem_ptr + memory_size; } if (stat != RDWR_STATUS_DONE) @@ -2416,7 +2522,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) break; } while (true); } - mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); } static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) @@ -2595,10 +2701,43 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) { - int ret; + int ret, i, j; struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; + if (card->pcie.reg->msix_support) { + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + card->msix_entries[i].entry = i; + ret = pci_enable_msix_exact(pdev, card->msix_entries, + MWIFIEX_NUM_MSIX_VECTORS); + if (!ret) { + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) { + card->msix_ctx[i].dev = pdev; + card->msix_ctx[i].msg_id = i; + + ret = request_irq(card->msix_entries[i].vector, + mwifiex_pcie_interrupt, 0, + "MWIFIEX_PCIE_MSIX", + &card->msix_ctx[i]); + if (ret) + break; + } + + if (ret) { + mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n", + ret); + for (j = 0; j < i; j++) + free_irq(card->msix_entries[j].vector, + &card->msix_ctx[i]); + pci_disable_msix(pdev); + } else { + mwifiex_dbg(adapter, MSG, "MSIx enabled!"); + card->msix_enable = 1; + return 0; + } + } + } + if (pci_enable_msi(pdev) != 0) pci_disable_msi(pdev); else @@ -2606,8 +2745,10 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable); + card->share_irq_ctx.dev = pdev; + card->share_irq_ctx.msg_id = -1; ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, - "MRVL_PCIE", pdev); + "MRVL_PCIE", &card->share_irq_ctx); if (ret) { pr_err("request_irq failed: ret=%d\n", ret); adapter->card = NULL; @@ -2635,8 +2776,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return -1; adapter->tx_buf_size = card->pcie.tx_buf_size; - adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; - adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); + adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl; + adapter->num_mem_types = card->pcie.num_mem_types; strcpy(adapter->fw_name, card->pcie.firmware); adapter->ext_scan = card->pcie.can_ext_scan; @@ -2653,11 +2794,28 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg; + struct pci_dev *pdev = card->dev; + int i; if (card) { - mwifiex_dbg(adapter, INFO, - "%s(): calling free_irq()\n", __func__); - free_irq(card->dev->irq, card->dev); + if (card->msix_enable) { + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + synchronize_irq(card->msix_entries[i].vector); + + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + free_irq(card->msix_entries[i].vector, + &card->msix_ctx[i]); + + card->msix_enable = 0; + pci_disable_msix(pdev); + } else { + mwifiex_dbg(adapter, INFO, + "%s(): calling free_irq()\n", __func__); + free_irq(card->dev->irq, &card->share_irq_ctx); + + if (card->msi_enable) + pci_disable_msi(pdev); + } reg = card->pcie.reg; if (reg->sleep_cookie) @@ -2675,6 +2833,7 @@ static struct mwifiex_if_ops pcie_ops = { .init_if = mwifiex_pcie_init, .cleanup_if = mwifiex_pcie_cleanup, .check_fw_status = mwifiex_check_fw_status, + .check_winner_status = mwifiex_check_winner_status, .prog_fw = mwifiex_prog_fw_w_helper, .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 6fc28737b576..29e58ce877e3 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -26,6 +26,7 @@ #include <linux/pcieport_if.h> #include <linux/interrupt.h> +#include "decl.h" #include "main.h" #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" @@ -135,6 +136,9 @@ struct mwifiex_pcie_card_reg { u16 fw_dump_ctrl; u16 fw_dump_start; u16 fw_dump_end; + u8 fw_dump_host_ready; + u8 fw_dump_read_done; + u8 msix_support; }; static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = { @@ -166,6 +170,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = { .ring_tx_start_ptr = 0, .pfu_enabled = 0, .sleep_cookie = 1, + .msix_support = 0, }; static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { @@ -200,6 +205,9 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { .fw_dump_ctrl = 0xcf4, .fw_dump_start = 0xcf8, .fw_dump_end = 0xcff, + .fw_dump_host_ready = 0xee, + .fw_dump_read_done = 0xfe, + .msix_support = 0, }; static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { @@ -231,6 +239,27 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, .pfu_enabled = 1, .sleep_cookie = 0, + .fw_dump_ctrl = 0xcf4, + .fw_dump_start = 0xcf8, + .fw_dump_end = 0xcff, + .fw_dump_host_ready = 0xcc, + .fw_dump_read_done = 0xdd, + .msix_support = 1, +}; + +static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = { + {"ITCM", NULL, 0, 0xF0}, + {"DTCM", NULL, 0, 0xF1}, + {"SQRAM", NULL, 0, 0xF2}, + {"IRAM", NULL, 0, 0xF3}, + {"APU", NULL, 0, 0xF4}, + {"CIU", NULL, 0, 0xF5}, + {"ICU", NULL, 0, 0xF6}, + {"MAC", NULL, 0, 0xF7}, +}; + +static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = { + {"DUMP", NULL, 0, 0xDD}, }; struct mwifiex_pcie_device { @@ -239,6 +268,8 @@ struct mwifiex_pcie_device { u16 blksz_fw_dl; u16 tx_buf_size; bool can_dump_fw; + struct memory_type_mapping *mem_type_mapping_tbl; + u8 num_mem_types; bool can_ext_scan; }; @@ -257,6 +288,8 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = { .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, .can_dump_fw = true, + .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897, + .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897), .can_ext_scan = true, }; @@ -265,7 +298,9 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = { .reg = &mwifiex_reg_8997, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, - .can_dump_fw = false, + .can_dump_fw = true, + .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997, + .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997), .can_ext_scan = true, }; @@ -290,6 +325,13 @@ struct mwifiex_pfu_buf_desc { u32 reserved; } __packed; +#define MWIFIEX_NUM_MSIX_VECTORS 4 + +struct mwifiex_msix_context { + struct pci_dev *dev; + u16 msg_id; +}; + struct pcie_service_card { struct pci_dev *dev; struct mwifiex_adapter *adapter; @@ -327,6 +369,12 @@ struct pcie_service_card { void __iomem *pci_mmap; void __iomem *pci_mmap1; int msi_enable; + int msix_enable; +#ifdef CONFIG_PCI + struct msix_entry msix_entries[MWIFIEX_NUM_MSIX_VECTORS]; +#endif + struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS]; + struct mwifiex_msix_context share_irq_ctx; }; static inline int diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index c20017ced566..489f7a911a83 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -547,6 +547,61 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, return chan_idx; } +/* This function creates a channel list tlv for bgscan config, based + * on region/band information. + */ +static int +mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv, + const struct mwifiex_bg_scan_cfg + *bgscan_cfg_in, + struct mwifiex_chan_scan_param_set + *scan_chan_list) +{ + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + struct mwifiex_adapter *adapter = priv->adapter; + int chan_idx = 0, i; + + for (band = 0; (band < IEEE80211_NUM_BANDS); band++) { + if (!priv->wdev.wiphy->bands[band]) + continue; + + sband = priv->wdev.wiphy->bands[band]; + + for (i = 0; (i < sband->n_channels) ; i++) { + ch = &sband->channels[i]; + if (ch->flags & IEEE80211_CHAN_DISABLED) + continue; + scan_chan_list[chan_idx].radio_type = band; + + if (bgscan_cfg_in->chan_list[0].scan_time) + scan_chan_list[chan_idx].max_scan_time = + cpu_to_le16((u16)bgscan_cfg_in-> + chan_list[0].scan_time); + else if (ch->flags & IEEE80211_CHAN_NO_IR) + scan_chan_list[chan_idx].max_scan_time = + cpu_to_le16(adapter->passive_scan_time); + else + scan_chan_list[chan_idx].max_scan_time = + cpu_to_le16(adapter-> + specific_scan_time); + + if (ch->flags & IEEE80211_CHAN_NO_IR) + scan_chan_list[chan_idx].chan_scan_mode_bitmap + |= MWIFIEX_PASSIVE_SCAN; + else + scan_chan_list[chan_idx].chan_scan_mode_bitmap + &= ~MWIFIEX_PASSIVE_SCAN; + + scan_chan_list[chan_idx].chan_number = + (u32)ch->hw_value; + chan_idx++; + } + } + return chan_idx; +} + /* This function appends rate TLV to scan config command. */ static int mwifiex_append_rate_tlv(struct mwifiex_private *priv, @@ -2037,6 +2092,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, u8 is_bgscan_resp; __le64 fw_tsf = 0; u8 *radio_type; + struct cfg80211_wowlan_nd_match *pmatch; + struct cfg80211_sched_scan_request *nd_config = NULL; is_bgscan_resp = (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_BG_SCAN_QUERY); @@ -2099,6 +2156,21 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, (struct mwifiex_ie_types_data **) &chan_band_tlv); +#ifdef CONFIG_PM + if (priv->wdev.wiphy->wowlan_config) + nd_config = priv->wdev.wiphy->wowlan_config->nd_config; +#endif + + if (nd_config) { + adapter->nd_info = + kzalloc(sizeof(struct cfg80211_wowlan_nd_match) + + sizeof(struct cfg80211_wowlan_nd_match *) * + scan_rsp->number_of_sets, GFP_ATOMIC); + + if (adapter->nd_info) + adapter->nd_info->n_matches = scan_rsp->number_of_sets; + } + for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) { /* * If the TSF TLV was appended to the scan results, save this @@ -2117,6 +2189,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, radio_type = NULL; } + if (chan_band_tlv && adapter->nd_info) { + adapter->nd_info->matches[idx] = + kzalloc(sizeof(*pmatch) + + sizeof(u32), GFP_ATOMIC); + + pmatch = adapter->nd_info->matches[idx]; + + if (pmatch) { + memset(pmatch, 0, sizeof(*pmatch)); + if (chan_band_tlv) { + pmatch->n_channels = 1; + pmatch->channels[0] = + chan_band->chan_number; + } + } + } + ret = mwifiex_parse_single_response_buf(priv, &bss_info, &bytes_left, le64_to_cpu(fw_tsf), @@ -2155,6 +2244,227 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv, return 0; } +/* This function prepares an background scan config command to be sent + * to the firmware + */ +int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + void *data_buf) +{ + struct host_cmd_ds_802_11_bg_scan_config *bgscan_config = + &cmd->params.bg_scan_config; + struct mwifiex_bg_scan_cfg *bgscan_cfg_in = data_buf; + u8 *tlv_pos = bgscan_config->tlv; + u8 num_probes; + u32 ssid_len, chan_idx, scan_type, scan_dur, chan_num; + int i; + struct mwifiex_ie_types_num_probes *num_probes_tlv; + struct mwifiex_ie_types_repeat_count *repeat_count_tlv; + struct mwifiex_ie_types_min_rssi_threshold *rssi_threshold_tlv; + struct mwifiex_ie_types_bgscan_start_later *start_later_tlv; + struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; + struct mwifiex_ie_types_chan_list_param_set *chan_list_tlv; + struct mwifiex_chan_scan_param_set *temp_chan; + + cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_CONFIG); + cmd->size = cpu_to_le16(sizeof(*bgscan_config) + S_DS_GEN); + + bgscan_config->action = cpu_to_le16(bgscan_cfg_in->action); + bgscan_config->enable = bgscan_cfg_in->enable; + bgscan_config->bss_type = bgscan_cfg_in->bss_type; + bgscan_config->scan_interval = + cpu_to_le32(bgscan_cfg_in->scan_interval); + bgscan_config->report_condition = + cpu_to_le32(bgscan_cfg_in->report_condition); + + /* stop sched scan */ + if (!bgscan_config->enable) + return 0; + + bgscan_config->chan_per_scan = bgscan_cfg_in->chan_per_scan; + + num_probes = (bgscan_cfg_in->num_probes ? bgscan_cfg_in-> + num_probes : priv->adapter->scan_probes); + + if (num_probes) { + num_probes_tlv = (struct mwifiex_ie_types_num_probes *)tlv_pos; + num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES); + num_probes_tlv->header.len = + cpu_to_le16(sizeof(num_probes_tlv->num_probes)); + num_probes_tlv->num_probes = cpu_to_le16((u16)num_probes); + + tlv_pos += sizeof(num_probes_tlv->header) + + le16_to_cpu(num_probes_tlv->header.len); + } + + if (bgscan_cfg_in->repeat_count) { + repeat_count_tlv = + (struct mwifiex_ie_types_repeat_count *)tlv_pos; + repeat_count_tlv->header.type = + cpu_to_le16(TLV_TYPE_REPEAT_COUNT); + repeat_count_tlv->header.len = + cpu_to_le16(sizeof(repeat_count_tlv->repeat_count)); + repeat_count_tlv->repeat_count = + cpu_to_le16(bgscan_cfg_in->repeat_count); + + tlv_pos += sizeof(repeat_count_tlv->header) + + le16_to_cpu(repeat_count_tlv->header.len); + } + + if (bgscan_cfg_in->rssi_threshold) { + rssi_threshold_tlv = + (struct mwifiex_ie_types_min_rssi_threshold *)tlv_pos; + rssi_threshold_tlv->header.type = + cpu_to_le16(TLV_TYPE_RSSI_LOW); + rssi_threshold_tlv->header.len = + cpu_to_le16(sizeof(rssi_threshold_tlv->rssi_threshold)); + rssi_threshold_tlv->rssi_threshold = + cpu_to_le16(bgscan_cfg_in->rssi_threshold); + + tlv_pos += sizeof(rssi_threshold_tlv->header) + + le16_to_cpu(rssi_threshold_tlv->header.len); + } + + for (i = 0; i < bgscan_cfg_in->num_ssids; i++) { + ssid_len = bgscan_cfg_in->ssid_list[i].ssid.ssid_len; + + wildcard_ssid_tlv = + (struct mwifiex_ie_types_wildcard_ssid_params *)tlv_pos; + wildcard_ssid_tlv->header.type = + cpu_to_le16(TLV_TYPE_WILDCARDSSID); + wildcard_ssid_tlv->header.len = cpu_to_le16( + (u16)(ssid_len + sizeof(wildcard_ssid_tlv-> + max_ssid_length))); + + /* max_ssid_length = 0 tells firmware to perform + * specific scan for the SSID filled, whereas + * max_ssid_length = IEEE80211_MAX_SSID_LEN is for + * wildcard scan. + */ + if (ssid_len) + wildcard_ssid_tlv->max_ssid_length = 0; + else + wildcard_ssid_tlv->max_ssid_length = + IEEE80211_MAX_SSID_LEN; + + memcpy(wildcard_ssid_tlv->ssid, + bgscan_cfg_in->ssid_list[i].ssid.ssid, ssid_len); + + tlv_pos += (sizeof(wildcard_ssid_tlv->header) + + le16_to_cpu(wildcard_ssid_tlv->header.len)); + } + + chan_list_tlv = (struct mwifiex_ie_types_chan_list_param_set *)tlv_pos; + + if (bgscan_cfg_in->chan_list[0].chan_number) { + dev_dbg(priv->adapter->dev, "info: bgscan: Using supplied channel list\n"); + + chan_list_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); + + for (chan_idx = 0; + chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX && + bgscan_cfg_in->chan_list[chan_idx].chan_number; + chan_idx++) { + temp_chan = chan_list_tlv->chan_scan_param + chan_idx; + + /* Increment the TLV header length by size appended */ + le16_add_cpu(&chan_list_tlv->header.len, + sizeof(chan_list_tlv->chan_scan_param)); + + temp_chan->chan_number = + bgscan_cfg_in->chan_list[chan_idx].chan_number; + temp_chan->radio_type = + bgscan_cfg_in->chan_list[chan_idx].radio_type; + + scan_type = + bgscan_cfg_in->chan_list[chan_idx].scan_type; + + if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) + temp_chan->chan_scan_mode_bitmap + |= MWIFIEX_PASSIVE_SCAN; + else + temp_chan->chan_scan_mode_bitmap + &= ~MWIFIEX_PASSIVE_SCAN; + + if (bgscan_cfg_in->chan_list[chan_idx].scan_time) { + scan_dur = (u16)bgscan_cfg_in-> + chan_list[chan_idx].scan_time; + } else { + scan_dur = (scan_type == + MWIFIEX_SCAN_TYPE_PASSIVE) ? + priv->adapter->passive_scan_time : + priv->adapter->specific_scan_time; + } + + temp_chan->min_scan_time = cpu_to_le16(scan_dur); + temp_chan->max_scan_time = cpu_to_le16(scan_dur); + } + } else { + dev_dbg(priv->adapter->dev, + "info: bgscan: Creating full region channel list\n"); + chan_num = + mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in, + chan_list_tlv-> + chan_scan_param); + le16_add_cpu(&chan_list_tlv->header.len, + chan_num * + sizeof(chan_list_tlv->chan_scan_param[0])); + } + + tlv_pos += (sizeof(chan_list_tlv->header) + + le16_to_cpu(chan_list_tlv->header.len)); + + if (bgscan_cfg_in->start_later) { + start_later_tlv = + (struct mwifiex_ie_types_bgscan_start_later *)tlv_pos; + start_later_tlv->header.type = + cpu_to_le16(TLV_TYPE_BGSCAN_START_LATER); + start_later_tlv->header.len = + cpu_to_le16(sizeof(start_later_tlv->start_later)); + start_later_tlv->start_later = + cpu_to_le16(bgscan_cfg_in->start_later); + + tlv_pos += sizeof(start_later_tlv->header) + + le16_to_cpu(start_later_tlv->header.len); + } + + /* Append vendor specific IE TLV */ + mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos); + + le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv); + + return 0; +} + +int mwifiex_stop_bg_scan(struct mwifiex_private *priv) +{ + struct mwifiex_bg_scan_cfg *bgscan_cfg; + + if (!priv->sched_scanning) { + dev_dbg(priv->adapter->dev, "bgscan already stopped!\n"); + return 0; + } + + bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL); + if (!bgscan_cfg) + return -ENOMEM; + + bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA; + bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET; + bgscan_cfg->enable = false; + + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG, + HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) { + kfree(bgscan_cfg); + return -EFAULT; + } + + kfree(bgscan_cfg); + priv->sched_scanning = false; + + return 0; +} + static void mwifiex_update_chan_statistics(struct mwifiex_private *priv, struct mwifiex_ietypes_chanstats *tlv_stat) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 4c8cae682c89..abf15dbdfe08 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev) /* Disable Host Sleep */ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), - MWIFIEX_ASYNC_CMD); + MWIFIEX_SYNC_CMD); return 0; } @@ -1039,19 +1039,14 @@ done: /* * This function checks the firmware status in card. - * - * The winner interface is also determined by this function. */ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { - struct sdio_mmc_card *card = adapter->card; int ret = 0; u16 firmware_stat; u32 tries; - u8 winner_status; - /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); if (ret) @@ -1065,16 +1060,25 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, } } - if (ret) { - if (mwifiex_read_reg - (adapter, card->reg->status_reg_0, &winner_status)) - winner_status = 0; + return ret; +} + +/* This function checks if WLAN is the winner. + */ +static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) +{ + int ret = 0; + u8 winner = 0; + struct sdio_mmc_card *card = adapter->card; + + if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner)) + return -1; + + if (winner) + adapter->winner = 0; + else + adapter->winner = 1; - if (winner_status) - adapter->winner = 0; - else - adapter->winner = 1; - } return ret; } @@ -2620,6 +2624,7 @@ static struct mwifiex_if_ops sdio_ops = { .init_if = mwifiex_init_sdio, .cleanup_if = mwifiex_cleanup_sdio, .check_fw_status = mwifiex_check_fw_status, + .check_winner_status = mwifiex_check_winner_status, .prog_fw = mwifiex_prog_fw_w_helper, .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index e486867a4c67..30f152601c57 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1813,6 +1813,22 @@ static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd, return 0; } +/* This function prepares command to get HS wakeup reason. + * + * Preparation includes - + * - Setting command ID, action and proper size + * - Ensuring correct endian-ness + */ +static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd) +{ + cmd->command = cpu_to_le16(HostCmd_CMD_HS_WAKEUP_REASON); + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_wakeup_reason) + + S_DS_GEN); + + return 0; +} + /* * This function prepares the commands before sending them to the firmware. * @@ -1873,6 +1889,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_802_11_SCAN: ret = mwifiex_cmd_802_11_scan(cmd_ptr, data_buf); break; + case HostCmd_CMD_802_11_BG_SCAN_CONFIG: + ret = mwifiex_cmd_802_11_bg_scan_config(priv, cmd_ptr, + data_buf); + break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_cmd_802_11_bg_scan_query(cmd_ptr); break; @@ -2063,6 +2083,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action, data_buf); break; + case HostCmd_CMD_HS_WAKEUP_REASON: + ret = mwifiex_cmd_get_wakeup_reason(priv, cmd_ptr); + break; case HostCmd_CMD_MC_POLICY: ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action, data_buf); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 9ac7aa2431b4..d96523e10eb4 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -1076,9 +1076,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_ret_802_11_scan(priv, resp); + cfg80211_sched_scan_results(priv->wdev.wiphy); mwifiex_dbg(adapter, CMD, "info: CMD_RESP: BG_SCAN result is ready!\n"); break; + case HostCmd_CMD_802_11_BG_SCAN_CONFIG: + break; case HostCmd_CMD_TXPWR_CFG: ret = mwifiex_ret_tx_power_cfg(priv, resp); break; @@ -1233,6 +1236,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); break; + case HostCmd_CMD_HS_WAKEUP_REASON: + ret = mwifiex_ret_wakeup_reason(priv, resp, data_buf); + break; case HostCmd_CMD_TDLS_CONFIG: break; case HostCmd_CMD_ROBUST_COEX: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index ff3ee9dfbbd5..070bce401151 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -92,6 +92,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) priv->is_data_rate_auto = true; priv->data_rate = 0; + priv->assoc_resp_ht_param = 0; + priv->ht_param_present = false; + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data) mwifiex_hist_data_reset(priv); @@ -607,11 +610,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_PS_AWAKE: mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); - if (!adapter->pps_uapsd_mode && priv->port_open && + if (!adapter->pps_uapsd_mode && + (priv->port_open || + (priv->bss_mode == NL80211_IFTYPE_ADHOC)) && priv->media_connected && adapter->sleep_period.period) { - adapter->pps_uapsd_mode = true; - mwifiex_dbg(adapter, EVENT, - "event: PPS/UAPSD mode activated\n"); + adapter->pps_uapsd_mode = true; + mwifiex_dbg(adapter, EVENT, + "event: PPS/UAPSD mode activated\n"); } adapter->tx_lock_flag = false; if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { @@ -686,6 +691,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) HostCmd_ACT_GEN_GET, 0, NULL, false); break; + case EVENT_BG_SCAN_STOPPED: + dev_dbg(adapter->dev, "event: BGS_STOPPED\n"); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); + if (priv->sched_scanning) + priv->sched_scanning = false; + break; + case EVENT_PORT_RELEASE: mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n"); priv->port_open = true; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 6a4fc5d183cf..5cbee58f8781 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -504,6 +504,20 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) } } + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + + if (priv && priv->sched_scanning) { +#ifdef CONFIG_PM + if (!priv->wdev.wiphy->wowlan_config->nd_config) { +#endif + mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); + mwifiex_stop_bg_scan(priv); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); +#ifdef CONFIG_PM + } +#endif + } + if (adapter->hs_activated) { mwifiex_dbg(adapter, CMD, "cmd: HS Already activated\n"); @@ -1114,11 +1128,12 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, * with requisite parameters and calls the IOCTL handler. */ int -mwifiex_get_ver_ext(struct mwifiex_private *priv) +mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel) { struct mwifiex_ver_ext ver_ext; memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext)); + ver_ext.version_str_sel = version_str_sel; if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT, HostCmd_ACT_GEN_GET, 0, &ver_ext, true)) return -1; @@ -1450,3 +1465,19 @@ mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len) return 0; } + +/* This function get Host Sleep wake up reason. + * + */ +int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, + int cmd_type, + struct mwifiex_ds_wakeup_reason *wakeup_reason) +{ + int status = 0; + + status = mwifiex_send_cmd(priv, HostCmd_CMD_HS_WAKEUP_REASON, + HostCmd_ACT_GEN_GET, 0, wakeup_reason, + cmd_type == MWIFIEX_SYNC_CMD); + + return status; +} diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index acccd6734e3b..0eb246502e1d 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -438,6 +438,7 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) mwifiex_set_ba_params(priv); mwifiex_reset_11n_rx_seq_num(priv); + priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX; atomic_set(&priv->wmm.tx_pkts_queued, 0); atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); } @@ -475,7 +476,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) priv = adapter->priv[i]; if (!priv) continue; - if (!priv->port_open) + if (!priv->port_open && + (priv->bss_mode != NL80211_IFTYPE_ADHOC)) continue; if (adapter->if_ops.is_port_ready && !adapter->if_ops.is_port_ready(priv)) @@ -1099,7 +1101,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; - if (!priv_tmp->port_open || + if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) && + !priv_tmp->port_open) || (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)) continue; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 9a3966cd6fbe..155f343981fe 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -273,8 +273,10 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); - rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 1a6740b4d396..2553cdd74066 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -274,8 +274,10 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); - rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(®, RXCSR0_DROP_MCAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index d26018f30b7d..2d64611de300 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -437,8 +437,10 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field16(®, TXRX_CSR2_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); - rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, 1); + rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field16(®, TXRX_CSR2_DROP_TODS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field16(®, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(®, TXRX_CSR2_DROP_MULTICAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 9733b31a780d..a26afcab03ed 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PHY_ERROR, !(filter_flags & FIF_PLCPFAIL)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_VER_ERROR, 1); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_MULTICAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 3282ddb766f4..6418620f95ff 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -107,7 +107,7 @@ * amount of bytes needed to move the data. */ #define ALIGN_SIZE(__skb, __header) \ - ( ((unsigned long)((__skb)->data + (__header))) & 3 ) + (((unsigned long)((__skb)->data + (__header))) & 3) /* * Constants for extra TX headroom for alignment purposes. @@ -128,14 +128,14 @@ #define SLOT_TIME 20 #define SHORT_SLOT_TIME 9 #define SIFS 10 -#define PIFS ( SIFS + SLOT_TIME ) -#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) -#define DIFS ( PIFS + SLOT_TIME ) -#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) -#define EIFS ( SIFS + DIFS + \ - GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) -#define SHORT_EIFS ( SIFS + SHORT_DIFS + \ - GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) +#define PIFS (SIFS + SLOT_TIME) +#define SHORT_PIFS (SIFS + SHORT_SLOT_TIME) +#define DIFS (PIFS + SLOT_TIME) +#define SHORT_DIFS (SHORT_PIFS + SHORT_SLOT_TIME) +#define EIFS (SIFS + DIFS + \ + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10)) +#define SHORT_EIFS (SIFS + SHORT_DIFS + \ + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10)) enum rt2x00_chip_intf { RT2X00_CHIP_INTF_PCI, @@ -669,6 +669,7 @@ enum rt2x00_state_flags { CONFIG_POWERSAVING, CONFIG_HT_DISABLED, CONFIG_QOS_DISABLED, + CONFIG_MONITORING, /* * Mark we currently are sequentially reading TX_STA_FIFO register diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c index 7e8bb1198ae9..6a1f508d472f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c @@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, else clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); + if (conf->flags & IEEE80211_CONF_MONITOR) + set_bit(CONFIG_MONITORING, &rt2x00dev->flags); + else + clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); + rt2x00dev->curr_band = conf->chandef.chan->band; rt2x00dev->curr_freq = conf->chandef.chan->center_freq; rt2x00dev->tx_power = conf->power_level; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 90fdb02b55e7..25ee3cb8e982 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -629,7 +629,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name, data += sprintf(data, "register\tbase\twords\twordsize\n"); #define RT2X00DEBUGFS_SPRINTF_REGISTER(__name) \ { \ - if(debug->__name.read) \ + if (debug->__name.read) \ data += sprintf(data, __stringify(__name) \ "\t%d\t%d\t%d\n", \ debug->__name.word_base, \ @@ -699,7 +699,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ ({ \ - if(debug->__name.read) { \ + if (debug->__name.read) { \ (__intf)->__name##_off_entry = \ debugfs_create_u32(__stringify(__name) "_offset", \ S_IRUSR | S_IWUSR, \ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 3c26ee65a415..13da95a24cf7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw, *total_flags |= FIF_PSPOLL; } - /* - * Check if there is any work left for us. - */ - if (rt2x00dev->packet_filter == *total_flags) - return; rt2x00dev->packet_filter = *total_flags; rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index c0e730ea1b69..24a3436ef952 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); - rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.h b/drivers/net/wireless/ralink/rt2x00/rt61pci.h index 1442075a8382..ab8641547a1f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.h @@ -138,14 +138,14 @@ #define PAIRWISE_TA_TABLE_BASE 0x1a00 #define SHARED_KEY_ENTRY(__idx) \ - ( SHARED_KEY_TABLE_BASE + \ - ((__idx) * sizeof(struct hw_key_entry)) ) + (SHARED_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry))) #define PAIRWISE_KEY_ENTRY(__idx) \ - ( PAIRWISE_KEY_TABLE_BASE + \ - ((__idx) * sizeof(struct hw_key_entry)) ) + (PAIRWISE_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry))) #define PAIRWISE_TA_ENTRY(__idx) \ - ( PAIRWISE_TA_TABLE_BASE + \ - ((__idx) * sizeof(struct hw_pairwise_ta_entry)) ) + (PAIRWISE_TA_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_pairwise_ta_entry))) struct hw_key_entry { u8 key[16]; @@ -180,7 +180,7 @@ struct hw_pairwise_ta_entry { #define HW_BEACON_BASE3 0x2f00 #define HW_BEACON_OFFSET(__index) \ - ( HW_BEACON_BASE0 + (__index * 0x0100) ) + (HW_BEACON_BASE0 + (__index * 0x0100)) /* * HOST-MCU shared memory. @@ -1287,9 +1287,9 @@ struct hw_pairwise_ta_entry { /* * DMA descriptor defines. */ -#define TXD_DESC_SIZE ( 16 * sizeof(__le32) ) -#define TXINFO_SIZE ( 6 * sizeof(__le32) ) -#define RXD_DESC_SIZE ( 16 * sizeof(__le32) ) +#define TXD_DESC_SIZE (16 * sizeof(__le32)) +#define TXINFO_SIZE (6 * sizeof(__le32)) +#define RXD_DESC_SIZE (16 * sizeof(__le32)) /* * TX descriptor format for TX, PRIO and Beacon Ring. diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index 7081e13b4fd6..7bbc86931168 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); - rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig index dd4d626aecbc..8f053c350227 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig +++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig @@ -13,7 +13,7 @@ config RTL8XXXU This driver is under development and has a limited feature set. In particular it does not yet support 40MHz channels and power management. However it should have a smaller - memory footprint than the vendor drivers and benetifs + memory footprint than the vendor drivers and benefits from the in kernel mac80211 stack. It can coexist with drivers from drivers/staging/rtl8723au, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6aed923a709a..fdeae3bada4d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1599,9 +1599,9 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv, enum nl80211_iftype linktype) { - u16 val8; + u8 val8; - val8 = rtl8xxxu_read16(priv, REG_MSR); + val8 = rtl8xxxu_read8(priv, REG_MSR); val8 &= ~MSR_LINKTYPE_MASK; switch (linktype) { @@ -1704,6 +1704,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->has_bluetooth = 1; if (val32 & MULTI_GPS_FUNC_EN) priv->has_gps = 1; + priv->is_multi_func = 1; } else if (val32 & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; @@ -1938,9 +1939,11 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) if (val16 & EEPROM_BOOT) priv->boot_eeprom = 1; - val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST); - val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT; - rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32); + if (priv->is_multi_func) { + val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST); + val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT; + rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32); + } dev_dbg(dev, "Booting from %s\n", priv->boot_eeprom ? "EEPROM" : "EFUSE"); @@ -2043,6 +2046,24 @@ exit: return ret; } +static void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 sys_func; + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 &= ~BIT(0); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC); + sys_func &= ~SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func |= SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); +} + static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; @@ -2067,6 +2088,12 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) val32 &= ~MCU_WINT_INIT_READY; rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32); + /* + * Reset the 8051 in order for the firmware to start running, + * otherwise it won't come up on the 8192eu + */ + rtl8xxxu_reset_8051(priv); + /* Wait for firmware to become ready */ for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) { val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); @@ -2100,19 +2127,30 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) /* 8051 enable */ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); - rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE); + val16 |= SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + + val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); + if (val8 & MCU_FW_RAM_SEL) { + pr_info("do the RAM reset\n"); + rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); + rtl8xxxu_reset_8051(priv); + } /* MCU firmware download enable */ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); - rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE); + val8 |= MCU_FW_DL_ENABLE; + rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8); /* 8051 reset */ val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); - rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19)); + val32 &= ~BIT(19); + rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32); /* Reset firmware download checksum */ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); - rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT); + val8 |= MCU_FW_DL_CSUM_REPORT; + rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8); pages = priv->fw_size / RTL_FW_PAGE_SIZE; remainder = priv->fw_size % RTL_FW_PAGE_SIZE; @@ -2121,7 +2159,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) for (i = 0; i < pages; i++) { val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8; - rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i); + val8 |= i; + rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8); ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS, fwptr, RTL_FW_PAGE_SIZE); @@ -2135,7 +2174,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) if (remainder) { val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8; - rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i); + val8 |= i; + rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8); ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS, fwptr, remainder); if (ret != remainder) { @@ -2148,8 +2188,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) fw_abort: /* MCU firmware download disable */ val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL); - rtl8xxxu_write16(priv, REG_MCU_FW_DL, - val16 & (~MCU_FW_DL_ENABLE & 0xff)); + val16 &= ~MCU_FW_DL_ENABLE; + rtl8xxxu_write16(priv, REG_MCU_FW_DL, val16); return ret; } @@ -2174,6 +2214,10 @@ static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name) } priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL); + if (!priv->fw_data) { + ret = -ENOMEM; + goto exit; + } priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header); signature = le16_to_cpu(priv->fw_data->signature); @@ -2492,8 +2536,6 @@ static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv, continue; } - reg &= 0x3f; - ret = rtl8xxxu_write_rfreg(priv, path, reg, val); if (ret) { dev_warn(&priv->udev->dev, @@ -4155,7 +4197,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) * Configure initial WMAC settings */ val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST | - /* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */ RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL | RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC; rtl8xxxu_write32(priv, REG_RCR, val32); @@ -4248,17 +4289,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0); - /* - * Not sure if we should get into this at all - */ - if (priv->iqk_initialized) { - rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, - priv->bb_recovery_backup, - RTL8XXXU_BB_REGS); - } else { - rtl8723a_phy_iq_calibrate(priv); - priv->iqk_initialized = true; - } + rtl8723a_phy_iq_calibrate(priv); /* * This should enable thermal meter @@ -4312,14 +4343,17 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); rtl8xxxu_write8(priv, REG_NAV_UPPER, val8); - /* - * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, - * but we need to fin root cause. - */ - val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); - if ((val32 & 0xff000000) != 0x83000000) { - val32 |= FPGA_RF_MODE_CCK; - rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + if (priv->rtlchip == 0x8723a) { + /* + * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, + * but we need to find root cause. + * This is 8723au only. + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + if ((val32 & 0xff000000) != 0x83000000) { + val32 |= FPGA_RF_MODE_CCK; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + } } val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL); @@ -4381,7 +4415,7 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv, } static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, const u8* mac) + struct ieee80211_vif *vif, const u8 *mac) { struct rtl8xxxu_priv *priv = hw->priv; u8 val8; @@ -4488,13 +4522,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rtl8xxxu_update_rate_mask(priv, ramask, sgi); - val32 = rtl8xxxu_read32(priv, REG_RCR); - val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON; - rtl8xxxu_write32(priv, REG_RCR, val32); - - /* Enable RX of data frames */ - rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff); - rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff); rtl8723a_stop_tx_beacon(priv); @@ -4505,17 +4532,10 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, h2c.joinbss.data = H2C_JOIN_BSS_CONNECT; } else { - val32 = rtl8xxxu_read32(priv, REG_RCR); - val32 &= ~(RCR_CHECK_BSSID_MATCH | - RCR_CHECK_BSSID_BEACON); - rtl8xxxu_write32(priv, REG_RCR, val32); - val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL); val8 |= BEACON_DISABLE_TSF_UPDATE; rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); - /* Disable RX of data frames */ - rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT; } h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT; @@ -5012,17 +5032,14 @@ static void rtl8xxxu_rx_complete(struct urb *urb) struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data; struct rtl8723au_phy_stats *phy_stats; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_mgmt *mgmt; struct device *dev = &priv->udev->dev; __le32 *_rx_desc_le = (__le32 *)skb->data; u32 *_rx_desc = (u32 *)skb->data; - int cnt, len, drvinfo_sz, desc_shift, i; + int drvinfo_sz, desc_shift, i; for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++) _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); - cnt = rx_desc->frag; - len = rx_desc->pktlen; drvinfo_sz = rx_desc->drvinfo_sz * 8; desc_shift = rx_desc->shift; skb_put(skb, urb->actual_length); @@ -5033,8 +5050,6 @@ static void rtl8xxxu_rx_complete(struct urb *urb) skb_pull(skb, drvinfo_sz + desc_shift); - mgmt = (struct ieee80211_mgmt *)skb->data; - memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); if (rx_desc->phy_stats) @@ -5284,11 +5299,56 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, unsigned int *total_flags, u64 multicast) { struct rtl8xxxu_priv *priv = hw->priv; + u32 rcr = rtl8xxxu_read32(priv, REG_RCR); dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n", __func__, changed_flags, *total_flags); - *total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC); + /* + * FIF_ALLMULTI ignored as all multicast frames are accepted (REG_MAR) + */ + + if (*total_flags & FIF_FCSFAIL) + rcr |= RCR_ACCEPT_CRC32; + else + rcr &= ~RCR_ACCEPT_CRC32; + + /* + * FIF_PLCPFAIL not supported? + */ + + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + rcr &= ~RCR_CHECK_BSSID_BEACON; + else + rcr |= RCR_CHECK_BSSID_BEACON; + + if (*total_flags & FIF_CONTROL) + rcr |= RCR_ACCEPT_CTRL_FRAME; + else + rcr &= ~RCR_ACCEPT_CTRL_FRAME; + + if (*total_flags & FIF_OTHER_BSS) { + rcr |= RCR_ACCEPT_AP; + rcr &= ~RCR_CHECK_BSSID_MATCH; + } else { + rcr &= ~RCR_ACCEPT_AP; + rcr |= RCR_CHECK_BSSID_MATCH; + } + + if (*total_flags & FIF_PSPOLL) + rcr |= RCR_ACCEPT_PM; + else + rcr &= ~RCR_ACCEPT_PM; + + /* + * FIF_PROBE_REQ ignored as probe requests always seem to be accepted + */ + + rtl8xxxu_write32(priv, REG_RCR, rcr); + + *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | + FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL | + FIF_PROBE_REQ); } static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts) @@ -5473,12 +5533,9 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) } exit: /* - * Disable all data frames - */ - rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); - /* - * Accept all mgmt frames + * Accept all data and mgmt frames */ + rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff); rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff); rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e); @@ -5891,8 +5948,6 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, -{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff), - .driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */ {USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index f2a1bac6c8ec..bbd0f6b76b82 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -597,6 +597,7 @@ struct rtl8xxxu_priv { struct rtl8723au_idx ht20_max_power_offset[3]; u32 chip_cut:4; u32 rom_rev:4; + u32 is_multi_func:1; u32 has_wifi:1; u32 has_bluetooth:1; u32 enable_bluetooth:1; @@ -652,7 +653,6 @@ struct rtl8xxxu_priv { u32 bb_recovery_backup[RTL8XXXU_BB_REGS]; u32 rtlchip; u8 pi_enabled:1; - u8 iqk_initialized:1; u8 int_buf[USB_INTR_CONTENT_LENGTH]; }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 23208f79b97c..8f6c9c6c7c09 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -45,6 +45,7 @@ #define APS_FSMCO_ENABLE_POWERDOWN BIT(4) #define APS_FSMCO_MAC_ENABLE BIT(8) #define APS_FSMCO_MAC_OFF BIT(9) +#define APS_FSMCO_SW_LPS BIT(10) #define APS_FSMCO_HW_SUSPEND BIT(11) #define APS_FSMCO_PCIE BIT(12) #define APS_FSMCO_HW_POWERDOWN BIT(15) @@ -213,6 +214,7 @@ #define SYS_CFG_PCIRSTB BIT(4) #define SYS_CFG_V15_VLD BIT(5) #define SYS_CFG_TRP_B15V_EN BIT(7) +#define SYS_CFG_SW_OFFLOAD_EN BIT(7) /* For chips with IOL support */ #define SYS_CFG_SIC_IDLE BIT(8) #define SYS_CFG_BD_MAC2 BIT(9) #define SYS_CFG_BD_MAC1 BIT(10) @@ -340,6 +342,11 @@ #define REG_BB_ACCEESS_CTRL 0x01e8 #define REG_BB_ACCESS_DATA 0x01ec +#define REG_HMBOX_EXT0_8723B 0x01f0 +#define REG_HMBOX_EXT1_8723B 0x01f4 +#define REG_HMBOX_EXT2_8723B 0x01f8 +#define REG_HMBOX_EXT3_8723B 0x01fc + /* 0x0200 ~ 0x027F TXDMA Configuration */ #define REG_RQPN 0x0200 #define RQPN_HI_PQ_SHIFT 0 @@ -352,6 +359,8 @@ #define REG_TXDMA_OFFSET_CHK 0x020c #define REG_TXDMA_STATUS 0x0210 #define REG_RQPN_NPQ 0x0214 +#define RQPN_NPQ_SHIFT 0 +#define RQPN_EPQ_SHIFT 16 /* 0x0280 ~ 0x02FF RXDMA Configuration */ #define REG_RXDMA_AGG_PG_TH 0x0280 @@ -453,6 +462,8 @@ #define REG_NEED_CPU_HANDLE 0x04e0 #define REG_PKT_LOSE_RPT 0x04e1 #define REG_PTCL_ERR_STATUS 0x04e2 +#define REG_TX_REPORT_CTRL 0x04ec +#define REG_TX_REPORT_TIME 0x04f0 #define REG_DUMMY 0x04fc /* 0x0500 ~ 0x05FF EDCA Configuration */ @@ -559,13 +570,25 @@ (Rx beacon, probe rsp) */ #define RCR_ACCEPT_CRC32 BIT(8) /* Accept CRC32 error packet */ #define RCR_ACCEPT_ICV BIT(9) /* Accept ICV error packet */ -#define RCR_ACCEPT_DATA_FRAME BIT(11) -#define RCR_ACCEPT_CTRL_FRAME BIT(12) -#define RCR_ACCEPT_MGMT_FRAME BIT(13) +#define RCR_ACCEPT_DATA_FRAME BIT(11) /* Accept all data pkt or use + REG_RXFLTMAP2 */ +#define RCR_ACCEPT_CTRL_FRAME BIT(12) /* Accept all control pkt or use + REG_RXFLTMAP1 */ +#define RCR_ACCEPT_MGMT_FRAME BIT(13) /* Accept all mgmt pkt or use + REG_RXFLTMAP0 */ #define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */ +#define RCR_UC_DATA_PKT_INT_ENABLE BIT(16) /* Enable unicast data packet + interrupt */ +#define RCR_BM_DATA_PKT_INT_ENABLE BIT(17) /* Enable broadcast data packet + interrupt */ +#define RCR_TIM_PARSER_ENABLE BIT(18) /* Enable RX beacon TIM parser*/ #define RCR_MFBEN BIT(22) -#define RCR_LSIGEN BIT(23) +#define RCR_LSIG_ENABLE BIT(23) /* Enable LSIG TXOP Protection + function. Search KEYCAM for + each rx packet to check if + LSIGEN bit is set. */ #define RCR_MULTI_BSSID_ENABLE BIT(24) /* Enable Multiple BssId */ +#define RCR_FORCE_ACK BIT(26) #define RCR_ACCEPT_BA_SSN BIT(27) /* Accept BA SSN */ #define RCR_APPEND_PHYSTAT BIT(28) #define RCR_APPEND_ICV BIT(29) @@ -632,9 +655,20 @@ #define REG_LPNAV_CTRL 0x0694 #define REG_WKFMCAM_CMD 0x0698 #define REG_WKFMCAM_RWD 0x069c -#define REG_RXFLTMAP0 0x06a0 -#define REG_RXFLTMAP1 0x06a2 -#define REG_RXFLTMAP2 0x06a4 + +/* + * RX Filters: each bit corresponds to the numerical value of the subtype. + * If it is set the subtype frame type is passed. The filter is only used when + * the RCR_ACCEPT_DATA_FRAME, RCR_ACCEPT_CTRL_FRAME, RCR_ACCEPT_MGMT_FRAME bit + * in the RCR are low. + * + * Example: Beacon subtype is binary 1000 which is decimal 8 so we have to set + * bit 8 (0x100) in REG_RXFLTMAP0 to enable reception. + */ +#define REG_RXFLTMAP0 0x06a0 /* Management frames */ +#define REG_RXFLTMAP1 0x06a2 /* Control frames */ +#define REG_RXFLTMAP2 0x06a4 /* Data frames */ + #define REG_BCN_PSR_RPT 0x06a8 #define REG_CALB32K_CTRL 0x06ac #define REG_PKT_MON_CTRL 0x06b4 @@ -742,6 +776,10 @@ #define REG_FPGA1_RF_MODE 0x0900 #define REG_FPGA1_TX_INFO 0x090c +#define REG_DPDT_CTRL 0x092c /* 8723BU */ +#define REG_RFE_CTRL_ANTA_SRC 0x0930 /* 8723BU */ +#define REG_RFE_PATH_SELECT 0x0940 /* 8723BU */ +#define REG_RFE_BUFFER 0x0944 /* 8723BU */ #define REG_CCK0_SYSTEM 0x0a00 #define CCK0_SIDEBAND BIT(4) @@ -770,6 +808,9 @@ #define REG_OFDM0_ENERGY_CCA_THRES 0x0c4c +#define REG_OFDM0_RX_D_SYNC_PATH 0x0c40 +#define OFDM0_SYNC_PATH_NOTCH_FILTER BIT(1) + #define REG_OFDM0_XA_AGC_CORE1 0x0c50 #define REG_OFDM0_XA_AGC_CORE2 0x0c54 #define REG_OFDM0_XB_AGC_CORE1 0x0c58 diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 4ae421ef30d9..02eba0e2afaa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -35,6 +35,22 @@ #include <linux/export.h> #include <net/cfg80211.h> +u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { + 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ + 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ + 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ + 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ + 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ + 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ + 165, 167, 169, 171, 173, 175, 177 /* Band 4 */ +}; +EXPORT_SYMBOL(channel5g); + +u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { + 42, 58, 106, 122, 138, 155, 171 +}; +EXPORT_SYMBOL(channel5g_80m); + void rtl_addr_delay(u32 addr) { if (addr == 0xfe) diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 7f471bff435c..140d2541562d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2392,7 +2392,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev) rtlpriv->cfg->ops->deinit_sw_vars(hw); if (rtlpci->irq_alloc) { - synchronize_irq(rtlpci->pdev->irq); free_irq(rtlpci->pdev->irq, hw); rtlpci->irq_alloc = 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index a62bf0a65c32..5be34118e0af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( case COUNTRY_CODE_SPAIN: case COUNTRY_CODE_FRANCE: case COUNTRY_CODE_ISRAEL: - case COUNTRY_CODE_WORLD_WIDE_13: return &rtl_regdom_12_13; case COUNTRY_CODE_MKK: case COUNTRY_CODE_MKK1: @@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( return &rtl_regdom_14_60_64; case COUNTRY_CODE_GLOBAL_DOMAIN: return &rtl_regdom_14; + case COUNTRY_CODE_WORLD_WIDE_13: case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL: return &rtl_regdom_12_13_5g_all; default: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index bb06fe836fe7..7810fe87dca7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -924,19 +924,11 @@ static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw, static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) { - u8 channel_5g[59] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, - 114, 116, 118, 120, 122, 124, 126, 128, - 130, 132, 134, 136, 138, 140, 149, 151, - 153, 155, 157, 159, 161, 163, 165 - }; u8 place = chnl; if (chnl > 14) { - for (place = 14; place < sizeof(channel_5g); place++) { - if (channel_5g[place] == chnl) { + for (place = 14; place < sizeof(channel5g); place++) { + if (channel5g[place] == chnl) { place++; break; } @@ -2471,16 +2463,9 @@ static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel) { int i; - u8 channel_5g[45] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, - 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, - 134, 136, 138, 140, 149, 151, 153, 155, 157, 159, - 161, 163, 165 - }; - for (i = 0; i < sizeof(channel_5g); i++) - if (channel == channel_5g[i]) + for (i = 0; i < sizeof(channel5g); i++) + if (channel == channel5g[i]) return true; return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 5f14308e8eb3..9fd3f1b6e4a8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2018,18 +2018,6 @@ static void _rtl92ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, struct rtl_efuse *efu = rtl_efuse(rtl_priv(hw)); struct txpower_info_2g pwr2g; struct txpower_info_5g pwr5g; - u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, - 56, 58, 60, 62, 64, 100, 102, 104, 106, - 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, - 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 163, 165, 167, 168, 169, 171, - 173, 175, 177 - }; - u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { - 42, 58, 106, 122, 138, 155, 171 - }; u8 rf, idx; u8 i; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index bbb789f8990b..5da9bd0e5002 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2786,14 +2786,6 @@ static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct txpower_info_2g pwrinfo24g; struct txpower_info_5g pwrinfo5g; - u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, - 56, 58, 60, 62, 64, 100, 102, 104, 106, - 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, - 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 163, 165, 167, 168, 169, 171, 173, 175, 177}; - u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171}; u8 rf_path, index; u8 i; @@ -2872,16 +2864,6 @@ static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct txpower_info_2g pwrinfo24g; struct txpower_info_5g pwrinfo5g; - u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, - 56, 58, 60, 62, 64, 100, 102, 104, 106, - 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, - 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 163, 165, 167, 168, 169, 171, - 173, 175, 177}; - u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { - 42, 58, 106, 122, 138, 155, 171}; u8 rf_path, index; u8 i; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 9b4d8a637915..74165b3eb362 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -1472,18 +1472,13 @@ static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); char channel_index = -1; - u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, - 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 149, - 151, 153, 155, 157, 159, 161, 163, 165, 167, 168, 169, 171, - 173, 175, 177}; u8 i = 0; + if (band == BAND_ON_2_4G) channel_index = channel - 1; else if (band == BAND_ON_5G) { - for (i = 0; i < sizeof(channel_5g)/sizeof(u8); ++i) { - if (channel_5g[i] == channel) + for (i = 0; i < sizeof(channel5g)/sizeof(u8); ++i) { + if (channel5g[i] == channel) channel_index = i; } } else @@ -2240,13 +2235,6 @@ void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index) { - u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, - 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, - 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, - 142, 144, 149, 151, 153, 155, 157, 159, 161, 163, 165, - 167, 168, 169, 171, 173, 175, 177 - }; u8 i = 0; bool in_24g = true; @@ -2257,7 +2245,7 @@ static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index) in_24g = false; for (i = 0; i < CHANNEL_MAX_NUMBER_5G; ++i) { - if (channel_5g[i] == channel) { + if (channel5g[i] == channel) { *chnl_index = i; return in_24g; } @@ -2728,13 +2716,10 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path, rate <= DESC_RATEVHT2SS_MCS9)) txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S]; } else if (bandwidth == HT_CHANNEL_WIDTH_80) { - u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { - 42, 58, 106, 122, 138, 155, 171 - }; u8 i; - for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i) - if (channel_5g_80m[i] == channel) + for (i = 0; i < sizeof(channel5g_80m) / sizeof(u8); ++i) + if (channel5g_80m[i] == channel) index = i; if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 4544752a2ba8..9e3cdd732eca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -116,17 +116,12 @@ #define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */ #define CHANNEL_MAX_NUMBER_2G 14 -#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to +#define CHANNEL_MAX_NUMBER_5G 49 /* Please refer to *"phy_GetChnlGroup8812A" and * "Hal_ReadTxPowerInfo8812A" */ #define CHANNEL_MAX_NUMBER_5G_80M 7 #define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */ -#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to - *"phy_GetChnlGroup8812A" and - * "Hal_ReadTxPowerInfo8812A" - */ -#define CHANNEL_MAX_NUMBER_5G_80M 7 #define MAX_PG_GROUP 13 #define CHANNEL_GROUP_MAX_2G 3 #define CHANNEL_GROUP_IDX_5GL 3 @@ -2904,6 +2899,10 @@ value to host byte ordering.*/ #define STBC_VHT_TEST_TX_ENABLE BIT(2) #define STBC_VHT_CAP_TX BIT(3) +extern u8 channel5g[CHANNEL_MAX_NUMBER_5G]; + +extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M]; + static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) { return rtlpriv->io.read8_sync(rtlpriv, addr); diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig index 969c9d79bfc8..8a8f1e711384 100644 --- a/drivers/net/wireless/ti/wlcore/Kconfig +++ b/drivers/net/wireless/ti/wlcore/Kconfig @@ -13,7 +13,7 @@ config WLCORE config WLCORE_SPI tristate "TI wlcore SPI support" - depends on WLCORE && SPI_MASTER + depends on WLCORE && SPI_MASTER && OF select CRC7 ---help--- This module adds support for the SPI interface of adapters using diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index 8367f9edfb11..7f672f6879d0 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -1205,26 +1205,11 @@ err_out: static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig) { - loff_t ret; - /* only requests of dword-aligned size and offset are supported */ if (offset % 4) return -EINVAL; - switch (orig) { - case SEEK_SET: - file->f_pos = offset; - ret = file->f_pos; - break; - case SEEK_CUR: - file->f_pos += offset; - ret = file->f_pos; - break; - default: - ret = -EINVAL; - } - - return ret; + return no_seek_end_llseek(file, offset, orig); } static const struct file_operations dev_mem_ops = { diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index c96405498bf4..4b59f67724de 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -38,7 +38,7 @@ int wlcore_event_fw_logger(struct wl1271 *wl) { - u32 ret; + int ret; struct fw_logger_information fw_log; u8 *buffer; u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS; diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 44f059f7f34e..020ac1a4b408 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -30,6 +30,8 @@ #include <linux/spi/spi.h> #include <linux/wl12xx.h> #include <linux/platform_device.h> +#include <linux/of_irq.h> +#include <linux/regulator/consumer.h> #include "wlcore.h" #include "wl12xx_80211.h" @@ -81,6 +83,7 @@ struct wl12xx_spi_glue { struct device *dev; struct platform_device *core; + struct regulator *reg; /* Power regulator */ }; static void wl12xx_spi_reset(struct device *child) @@ -318,14 +321,76 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, return 0; } +/** + * wl12xx_spi_set_power - power on/off the wl12xx unit + * @child: wl12xx device handle. + * @enable: true/false to power on/off the unit. + * + * use the WiFi enable regulator to enable/disable the WiFi unit. + */ +static int wl12xx_spi_set_power(struct device *child, bool enable) +{ + int ret = 0; + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + + WARN_ON(!glue->reg); + + /* Update regulator state */ + if (enable) { + ret = regulator_enable(glue->reg); + if (ret) + dev_err(child, "Power enable failure\n"); + } else { + ret = regulator_disable(glue->reg); + if (ret) + dev_err(child, "Power disable failure\n"); + } + + return ret; +} + static struct wl1271_if_operations spi_ops = { .read = wl12xx_spi_raw_read, .write = wl12xx_spi_raw_write, .reset = wl12xx_spi_reset, .init = wl12xx_spi_init, + .power = wl12xx_spi_set_power, .set_block_size = NULL, }; +static const struct of_device_id wlcore_spi_of_match_table[] = { + { .compatible = "ti,wl1271" }, + { } +}; +MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table); + +/** + * wlcore_probe_of - DT node parsing. + * @spi: SPI slave device parameters. + * @res: resource parameters. + * @glue: wl12xx SPI bus to slave device glue parameters. + * @pdev_data: wlcore device parameters + */ +static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, + struct wlcore_platdev_data *pdev_data) +{ + struct device_node *dt_node = spi->dev.of_node; + int ret; + + if (of_find_property(dt_node, "clock-xtal", NULL)) + pdev_data->ref_clock_xtal = true; + + ret = of_property_read_u32(dt_node, "ref-clock-frequency", + &pdev_data->ref_clock_freq); + if (IS_ERR_VALUE(ret)) { + dev_err(glue->dev, + "can't get reference clock frequency (%d)\n", ret); + return ret; + } + + return 0; +} + static int wl1271_probe(struct spi_device *spi) { struct wl12xx_spi_glue *glue; @@ -335,8 +400,6 @@ static int wl1271_probe(struct spi_device *spi) memset(&pdev_data, 0x00, sizeof(pdev_data)); - /* TODO: add DT parsing when needed */ - pdev_data.if_ops = &spi_ops; glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL); @@ -353,6 +416,21 @@ static int wl1271_probe(struct spi_device *spi) * comes from the board-peripherals file */ spi->bits_per_word = 32; + glue->reg = devm_regulator_get(&spi->dev, "vwlan"); + if (PTR_ERR(glue->reg) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (IS_ERR(glue->reg)) { + dev_err(glue->dev, "can't get regulator\n"); + return PTR_ERR(glue->reg); + } + + ret = wlcore_probe_of(spi, glue, &pdev_data); + if (IS_ERR_VALUE(ret)) { + dev_err(glue->dev, + "can't get device tree parameters (%d)\n", ret); + return ret; + } + ret = spi_setup(spi); if (ret < 0) { dev_err(glue->dev, "spi_setup failed\n"); @@ -370,7 +448,7 @@ static int wl1271_probe(struct spi_device *spi) memset(res, 0x00, sizeof(res)); res[0].start = spi->irq; - res[0].flags = IORESOURCE_IRQ; + res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq); res[0].name = "irq"; ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); @@ -408,10 +486,10 @@ static int wl1271_remove(struct spi_device *spi) return 0; } - static struct spi_driver wl1271_spi_driver = { .driver = { .name = "wl1271_spi", + .of_match_table = of_match_ptr(wlcore_spi_of_match_table), }, .probe = wl1271_probe, diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 0333ab0fd926..112825200d41 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -251,6 +251,7 @@ struct xenvif { unsigned int stalled_queues; struct xenbus_watch credit_watch; + struct xenbus_watch mcast_ctrl_watch; spinlock_t lock; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e7bd63eb2876..f5231a2dd2ac 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -615,6 +615,7 @@ err_tx_unbind: queue->tx_irq = 0; err_unmap: xenvif_unmap_frontend_rings(queue); + netif_napi_del(&queue->napi); err: module_put(THIS_MODULE); return err; @@ -684,22 +685,16 @@ void xenvif_deinit_queue(struct xenvif_queue *queue) void xenvif_free(struct xenvif *vif) { - struct xenvif_queue *queue = NULL; + struct xenvif_queue *queues = vif->queues; unsigned int num_queues = vif->num_queues; unsigned int queue_index; unregister_netdev(vif->dev); - - for (queue_index = 0; queue_index < num_queues; ++queue_index) { - queue = &vif->queues[queue_index]; - xenvif_deinit_queue(queue); - } - - vfree(vif->queues); - vif->queues = NULL; - vif->num_queues = 0; - free_netdev(vif->dev); + for (queue_index = 0; queue_index < num_queues; ++queue_index) + xenvif_deinit_queue(&queues[queue_index]); + vfree(queues); + module_put(THIS_MODULE); } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1049c34e7d43..61b97c34bb3b 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -149,20 +149,19 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -static int xenvif_rx_ring_slots_needed(struct xenvif *vif) -{ - if (vif->gso_mask) - return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1; - else - return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE); -} - static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) { RING_IDX prod, cons; + struct sk_buff *skb; int needed; - needed = xenvif_rx_ring_slots_needed(queue->vif); + skb = skb_peek(&queue->rx_queue); + if (!skb) + return false; + + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); + if (skb_is_gso(skb)) + needed++; do { prod = queue->rx.sring->req_prod; @@ -2005,8 +2004,7 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) static bool xenvif_have_rx_work(struct xenvif_queue *queue) { - return (!skb_queue_empty(&queue->rx_queue) - && xenvif_rx_ring_slots_available(queue)) + return xenvif_rx_ring_slots_available(queue) || (queue->vif->stall_timeout && (xenvif_rx_queue_stalled(queue) || xenvif_rx_queue_ready(queue))) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 56ebd8267386..39a303de20dd 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -327,7 +327,7 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } - /* We support multicast-control. */ + /* We support dynamic multicast-control. */ err = xenbus_printf(xbt, dev->nodename, "feature-multicast-control", "%d", 1); if (err) { @@ -335,6 +335,14 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } + err = xenbus_printf(xbt, dev->nodename, + "feature-dynamic-multicast-control", + "%d", 1); + if (err) { + message = "writing feature-dynamic-multicast-control"; + goto abort_transaction; + } + err = xenbus_transaction_end(xbt, 0); } while (err == -EAGAIN); @@ -683,7 +691,8 @@ static void xen_net_rate_changed(struct xenbus_watch *watch, } } -static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) +static int xen_register_credit_watch(struct xenbus_device *dev, + struct xenvif *vif) { int err = 0; char *node; @@ -708,7 +717,7 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) return err; } -static void xen_unregister_watchers(struct xenvif *vif) +static void xen_unregister_credit_watch(struct xenvif *vif) { if (vif->credit_watch.node) { unregister_xenbus_watch(&vif->credit_watch); @@ -717,6 +726,75 @@ static void xen_unregister_watchers(struct xenvif *vif) } } +static void xen_mcast_ctrl_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) +{ + struct xenvif *vif = container_of(watch, struct xenvif, + mcast_ctrl_watch); + struct xenbus_device *dev = xenvif_to_xenbus_device(vif); + int val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, + "request-multicast-control", "%d", &val) < 0) + val = 0; + vif->multicast_control = !!val; +} + +static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, + struct xenvif *vif) +{ + int err = 0; + char *node; + unsigned maxlen = strlen(dev->otherend) + + sizeof("/request-multicast-control"); + + if (vif->mcast_ctrl_watch.node) { + pr_err_ratelimited("Watch is already registered\n"); + return -EADDRINUSE; + } + + node = kmalloc(maxlen, GFP_KERNEL); + if (!node) { + pr_err("Failed to allocate memory for watch\n"); + return -ENOMEM; + } + snprintf(node, maxlen, "%s/request-multicast-control", + dev->otherend); + vif->mcast_ctrl_watch.node = node; + vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed; + err = register_xenbus_watch(&vif->mcast_ctrl_watch); + if (err) { + pr_err("Failed to set watcher %s\n", + vif->mcast_ctrl_watch.node); + kfree(node); + vif->mcast_ctrl_watch.node = NULL; + vif->mcast_ctrl_watch.callback = NULL; + } + return err; +} + +static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif) +{ + if (vif->mcast_ctrl_watch.node) { + unregister_xenbus_watch(&vif->mcast_ctrl_watch); + kfree(vif->mcast_ctrl_watch.node); + vif->mcast_ctrl_watch.node = NULL; + } +} + +static void xen_register_watchers(struct xenbus_device *dev, + struct xenvif *vif) +{ + xen_register_credit_watch(dev, vif); + xen_register_mcast_ctrl_watch(dev, vif); +} + +static void xen_unregister_watchers(struct xenvif *vif) +{ + xen_unregister_mcast_ctrl_watch(vif); + xen_unregister_credit_watch(vif); +} + static void unregister_hotplug_status_watch(struct backend_info *be) { if (be->have_hotplug_status_watch) { @@ -1030,11 +1108,6 @@ static int read_xenbus_vif_flags(struct backend_info *be) val = 0; vif->ipv6_csum = !!val; - if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control", - "%d", &val) < 0) - val = 0; - vif->multicast_control = !!val; - return 0; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d6abf191122a..96ccd4e943db 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; + bool more_to_do; BUG_ON(!netif_carrier_ok(queue->info->netdev)); @@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) queue->tx.rsp_cons = prod; - /* - * Set a new event, then check for race with update of tx_cons. - * Note that it is essential to schedule a callback, no matter - * how few buffers are pending. Even if there is space in the - * transmit ring, higher layers may be blocked because too much - * data is outstanding: in such cases notification from Xen is - * likely to be the only kick that we'll get. - */ - queue->tx.sring->rsp_event = - prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; - mb(); /* update shared area */ - } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); + RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); + } while (more_to_do); xennet_maybe_wake_tx(queue); } |