diff options
Diffstat (limited to 'drivers/net/ethernet')
329 files changed, 25393 insertions, 8080 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index c53384d41c96..35df0b9e6848 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&lp->lock, flags); - dev_kfree_skb (skb); + dev_consume_skb_any (skb); /* Clear the Tx status stack. */ { diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 5992860a39c9..063557e037f2 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -1,23 +1,24 @@ -/*====================================================================== - - A PCMCIA ethernet driver for the 3com 3c589 card. - - Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net - - 3c589_cs.c 1.162 2001/10/13 00:08:50 - - The network driver code is based on Donald Becker's 3c589 code: - - Written 1994 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - Donald Becker may be reached at becker@scyld.com - - Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> - -======================================================================*/ +/* ====================================================================== + * + * A PCMCIA ethernet driver for the 3com 3c589 card. + * + * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net + * + * 3c589_cs.c 1.162 2001/10/13 00:08:50 + * + * The network driver code is based on Donald Becker's 3c589 code: + * + * Written 1994 by Donald Becker. + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. This software may be used and + * distributed according to the terms of the GNU General Public License, + * incorporated herein by reference. + * Donald Becker may be reached at becker@scyld.com + * + * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> + * + * ====================================================================== + */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -41,18 +42,20 @@ #include <linux/ioport.h> #include <linux/bitops.h> #include <linux/jiffies.h> +#include <linux/uaccess.h> +#include <linux/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> -#include <asm/uaccess.h> -#include <asm/io.h> /* To minimize the size of the driver source I only define operating - constants if they are used several times. You'll need the manual - if you want to understand driver details. */ + * constants if they are used several times. You'll need the manual + * if you want to understand driver details. + */ + /* Offsets from base I/O address. */ #define EL3_DATA 0x00 #define EL3_TIMER 0x0a @@ -65,7 +68,9 @@ #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) /* The top five bits written to EL3_CMD are a command, the lower - 11 bits are the parameter, if applicable. */ + * 11 bits are the parameter, if applicable. + */ + enum c509cmd { TotalReset = 0<<11, SelectWindow = 1<<11, @@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = { static int tc589_probe(struct pcmcia_device *link) { - struct el3_private *lp; - struct net_device *dev; + struct el3_private *lp; + struct net_device *dev; - dev_dbg(&link->dev, "3c589_attach()\n"); + dev_dbg(&link->dev, "3c589_attach()\n"); - /* Create new ethernet device */ - dev = alloc_etherdev(sizeof(struct el3_private)); - if (!dev) - return -ENOMEM; - lp = netdev_priv(dev); - link->priv = dev; - lp->p_dev = link; + /* Create new ethernet device */ + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + link->priv = dev; + lp->p_dev = link; - spin_lock_init(&lp->lock); - link->resource[0]->end = 16; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + spin_lock_init(&lp->lock); + link->resource[0]->end = 16; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; - link->config_flags |= CONF_ENABLE_IRQ; - link->config_index = 1; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; - dev->netdev_ops = &el3_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; + dev->netdev_ops = &el3_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; - SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); + SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); - return tc589_config(link); + return tc589_config(link); } static void tc589_detach(struct pcmcia_device *link) { - struct net_device *dev = link->priv; + struct net_device *dev = link->priv; - dev_dbg(&link->dev, "3c589_detach\n"); + dev_dbg(&link->dev, "3c589_detach\n"); - unregister_netdev(dev); + unregister_netdev(dev); - tc589_release(link); + tc589_release(link); - free_netdev(dev); + free_netdev(dev); } /* tc589_detach */ static int tc589_config(struct pcmcia_device *link) { - struct net_device *dev = link->priv; - __be16 *phys_addr; - int ret, i, j, multi = 0, fifo; - unsigned int ioaddr; - static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; - u8 *buf; - size_t len; - - dev_dbg(&link->dev, "3c589_config\n"); - - phys_addr = (__be16 *)dev->dev_addr; - /* Is this a 3c562? */ - if (link->manf_id != MANFID_3COM) - dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); - multi = (link->card_id == PRODID_3COM_3C562); - - link->io_lines = 16; - - /* For the 3c562, the base address must be xx00-xx7f */ - for (i = j = 0; j < 0x400; j += 0x10) { - if (multi && (j & 0x80)) continue; - link->resource[0]->start = j ^ 0x300; - i = pcmcia_request_io(link); - if (i == 0) - break; - } - if (i != 0) - goto failed; - - ret = pcmcia_request_irq(link, el3_interrupt); - if (ret) - goto failed; - - ret = pcmcia_enable_device(link); - if (ret) - goto failed; - - dev->irq = link->irq; - dev->base_addr = link->resource[0]->start; - ioaddr = dev->base_addr; - EL3WINDOW(0); - - /* The 3c589 has an extra EEPROM for configuration info, including - the hardware address. The 3c562 puts the address in the CIS. */ - len = pcmcia_get_tuple(link, 0x88, &buf); - if (buf && len >= 6) { - for (i = 0; i < 3; i++) - phys_addr[i] = htons(le16_to_cpu(buf[i*2])); - kfree(buf); - } else { - kfree(buf); /* 0 < len < 6 */ - for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - if (phys_addr[0] == htons(0x6060)) { - dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", - dev->base_addr, dev->base_addr+15); - goto failed; + struct net_device *dev = link->priv; + __be16 *phys_addr; + int ret, i, j, multi = 0, fifo; + unsigned int ioaddr; + static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + u8 *buf; + size_t len; + + dev_dbg(&link->dev, "3c589_config\n"); + + phys_addr = (__be16 *)dev->dev_addr; + /* Is this a 3c562? */ + if (link->manf_id != MANFID_3COM) + dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); + multi = (link->card_id == PRODID_3COM_3C562); + + link->io_lines = 16; + + /* For the 3c562, the base address must be xx00-xx7f */ + for (i = j = 0; j < 0x400; j += 0x10) { + if (multi && (j & 0x80)) + continue; + link->resource[0]->start = j ^ 0x300; + i = pcmcia_request_io(link); + if (i == 0) + break; } - } - - /* The address and resource configuration register aren't loaded from - the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */ - outw(0x3f00, ioaddr + 8); - fifo = inl(ioaddr); - - /* The if_port symbol can be set when the module is loaded */ - if ((if_port >= 0) && (if_port <= 3)) - dev->if_port = if_port; - else - dev_err(&link->dev, "invalid if_port requested\n"); - - SET_NETDEV_DEV(dev, &link->dev); - - if (register_netdev(dev) != 0) { - dev_err(&link->dev, "register_netdev() failed\n"); - goto failed; - } - - netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", - (multi ? "562" : "589"), dev->base_addr, dev->irq, - dev->dev_addr); - netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", - (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], - if_names[dev->if_port]); - return 0; + if (i != 0) + goto failed; + + ret = pcmcia_request_irq(link, el3_interrupt); + if (ret) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + ioaddr = dev->base_addr; + EL3WINDOW(0); + + /* The 3c589 has an extra EEPROM for configuration info, including + * the hardware address. The 3c562 puts the address in the CIS. + */ + len = pcmcia_get_tuple(link, 0x88, &buf); + if (buf && len >= 6) { + for (i = 0; i < 3; i++) + phys_addr[i] = htons(le16_to_cpu(buf[i*2])); + kfree(buf); + } else { + kfree(buf); /* 0 < len < 6 */ + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + if (phys_addr[0] == htons(0x6060)) { + dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", + dev->base_addr, dev->base_addr+15); + goto failed; + } + } + + /* The address and resource configuration register aren't loaded from + * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. + */ + + outw(0x3f00, ioaddr + 8); + fifo = inl(ioaddr); + + /* The if_port symbol can be set when the module is loaded */ + if ((if_port >= 0) && (if_port <= 3)) + dev->if_port = if_port; + else + dev_err(&link->dev, "invalid if_port requested\n"); + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + dev_err(&link->dev, "register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", + (multi ? "562" : "589"), dev->base_addr, dev->irq, + dev->dev_addr); + netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", + (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], + if_names[dev->if_port]); + return 0; failed: - tc589_release(link); - return -ENODEV; + tc589_release(link); + return -ENODEV; } /* tc589_config */ static void tc589_release(struct pcmcia_device *link) @@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link) /*====================================================================*/ -/* - Use this for commands that may take time to finish -*/ +/* Use this for commands that may take time to finish */ + static void tc589_wait_for_completion(struct net_device *dev, int cmd) { - int i = 100; - outw(cmd, dev->base_addr + EL3_CMD); - while (--i > 0) - if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; - if (i == 0) - netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); + int i = 100; + outw(cmd, dev->base_addr + EL3_CMD); + while (--i > 0) + if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) + break; + if (i == 0) + netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); } -/* - Read a word from the EEPROM using the regular EEPROM access register. - Assume that we are in register window zero. -*/ +/* Read a word from the EEPROM using the regular EEPROM access register. + * Assume that we are in register window zero. + */ + static u16 read_eeprom(unsigned int ioaddr, int index) { - int i; - outw(EEPROM_READ + index, ioaddr + 10); - /* Reading the eeprom takes 162 us */ - for (i = 1620; i >= 0; i--) - if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) - break; - return inw(ioaddr + 12); + int i; + outw(EEPROM_READ + index, ioaddr + 10); + /* Reading the eeprom takes 162 us */ + for (i = 1620; i >= 0; i--) + if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) + break; + return inw(ioaddr + 12); } -/* - Set transceiver type, perhaps to something other than what the user - specified in dev->if_port. -*/ +/* Set transceiver type, perhaps to something other than what the user + * specified in dev->if_port. + */ + static void tc589_set_xcvr(struct net_device *dev, int if_port) { - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - - EL3WINDOW(0); - switch (if_port) { - case 0: case 1: outw(0, ioaddr + 6); break; - case 2: outw(3<<14, ioaddr + 6); break; - case 3: outw(1<<14, ioaddr + 6); break; - } - /* On PCMCIA, this just turns on the LED */ - outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); - /* 10baseT interface, enable link beat and jabber check. */ - EL3WINDOW(4); - outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); - EL3WINDOW(1); - if (if_port == 2) - lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); - else - lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + EL3WINDOW(0); + switch (if_port) { + case 0: + case 1: + outw(0, ioaddr + 6); + break; + case 2: + outw(3<<14, ioaddr + 6); + break; + case 3: + outw(1<<14, ioaddr + 6); + break; + } + /* On PCMCIA, this just turns on the LED */ + outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); + /* 10baseT interface, enable link beat and jabber check. */ + EL3WINDOW(4); + outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); + EL3WINDOW(1); + if (if_port == 2) + lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); + else + lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); } static void dump_status(struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - EL3WINDOW(1); - netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", - inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), - inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); - EL3WINDOW(4); - netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", - inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), - inw(ioaddr+0x0a)); - EL3WINDOW(1); + unsigned int ioaddr = dev->base_addr; + EL3WINDOW(1); + netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), + inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); + EL3WINDOW(4); + netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", + inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), + inw(ioaddr+0x0a)); + EL3WINDOW(1); } /* Reset and restore all of the 3c589 registers. */ static void tc589_reset(struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - int i; - - EL3WINDOW(0); - outw(0x0001, ioaddr + 4); /* Activate board. */ - outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ - - /* Set the station address in window 2. */ - EL3WINDOW(2); - for (i = 0; i < 6; i++) - outb(dev->dev_addr[i], ioaddr + i); - - tc589_set_xcvr(dev, dev->if_port); - - /* Switch to the stats window, and clear all stats by reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - EL3WINDOW(6); - for (i = 0; i < 9; i++) - inb(ioaddr+i); - inw(ioaddr + 10); - inw(ioaddr + 12); - - /* Switch to register set 1 for normal use. */ - EL3WINDOW(1); - - set_rx_mode(dev); - outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ - outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ - outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ - /* Allow status bits to be seen. */ - outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); - /* Ack all pending events, and set active indicator mask. */ - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + unsigned int ioaddr = dev->base_addr; + int i; + + EL3WINDOW(0); + outw(0x0001, ioaddr + 4); /* Activate board. */ + outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ + + /* Set the station address in window 2. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + + tc589_set_xcvr(dev, dev->if_port); + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 9; i++) + inb(ioaddr+i); + inw(ioaddr + 10); + inw(ioaddr + 12); + + /* Switch to register set 1 for normal use. */ + EL3WINDOW(1); + + set_rx_mode(dev); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, ioaddr + EL3_CMD); - outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull + outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull | AdapterFailure, ioaddr + EL3_CMD); } @@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = { static int el3_config(struct net_device *dev, struct ifmap *map) { - if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { - if (map->port <= 3) { - dev->if_port = map->port; - netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); - tc589_set_xcvr(dev, dev->if_port); - } else - return -EINVAL; - } - return 0; + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (map->port <= 3) { + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + tc589_set_xcvr(dev, dev->if_port); + } else { + return -EINVAL; + } + } + return 0; } static int el3_open(struct net_device *dev) { - struct el3_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; - if (!pcmcia_dev_present(link)) - return -ENODEV; + if (!pcmcia_dev_present(link)) + return -ENODEV; - link->open++; - netif_start_queue(dev); + link->open++; + netif_start_queue(dev); - tc589_reset(dev); - init_timer(&lp->media); - lp->media.function = media_check; - lp->media.data = (unsigned long) dev; - lp->media.expires = jiffies + HZ; - add_timer(&lp->media); + tc589_reset(dev); + init_timer(&lp->media); + lp->media.function = media_check; + lp->media.data = (unsigned long) dev; + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); - dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", + dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", dev->name, inw(dev->base_addr + EL3_STATUS)); - return 0; + return 0; } static void el3_tx_timeout(struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - - netdev_warn(dev, "Transmit timed out!\n"); - dump_status(dev); - dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ - /* Issue TX_RESET and TX_START commands. */ - tc589_wait_for_completion(dev, TxReset); - outw(TxEnable, ioaddr + EL3_CMD); - netif_wake_queue(dev); + unsigned int ioaddr = dev->base_addr; + + netdev_warn(dev, "Transmit timed out!\n"); + dump_status(dev); + dev->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + /* Issue TX_RESET and TX_START commands. */ + tc589_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + netif_wake_queue(dev); } static void pop_tx_status(struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - int i; - - /* Clear the Tx status stack. */ - for (i = 32; i > 0; i--) { - u_char tx_status = inb(ioaddr + TX_STATUS); - if (!(tx_status & 0x84)) break; - /* reset transmitter on jabber error or underrun */ - if (tx_status & 0x30) - tc589_wait_for_completion(dev, TxReset); - if (tx_status & 0x38) { - netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); - outw(TxEnable, ioaddr + EL3_CMD); - dev->stats.tx_aborted_errors++; + unsigned int ioaddr = dev->base_addr; + int i; + + /* Clear the Tx status stack. */ + for (i = 32; i > 0; i--) { + u_char tx_status = inb(ioaddr + TX_STATUS); + if (!(tx_status & 0x84)) + break; + /* reset transmitter on jabber error or underrun */ + if (tx_status & 0x30) + tc589_wait_for_completion(dev, TxReset); + if (tx_status & 0x38) { + netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); + outw(TxEnable, ioaddr + EL3_CMD); + dev->stats.tx_aborted_errors++; + } + outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ } - outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ - } } static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - struct el3_private *priv = netdev_priv(dev); - unsigned long flags; + unsigned int ioaddr = dev->base_addr; + struct el3_private *priv = netdev_priv(dev); + unsigned long flags; - netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", + netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", (long)skb->len, inw(ioaddr + EL3_STATUS)); - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->lock, flags); - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; - /* Put out the doubleword header... */ - outw(skb->len, ioaddr + TX_FIFO); - outw(0x00, ioaddr + TX_FIFO); - /* ... and the packet rounded to a doubleword. */ - outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + /* Put out the doubleword header... */ + outw(skb->len, ioaddr + TX_FIFO); + outw(0x00, ioaddr + TX_FIFO); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); - if (inw(ioaddr + TX_FREE) <= 1536) { - netif_stop_queue(dev); - /* Interrupt us when the FIFO has room for max-sized packet. */ - outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); - } + if (inw(ioaddr + TX_FREE) <= 1536) { + netif_stop_queue(dev); + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); + } - pop_tx_status(dev); - spin_unlock_irqrestore(&priv->lock, flags); - dev_kfree_skb(skb); + pop_tx_status(dev); + spin_unlock_irqrestore(&priv->lock, flags); + dev_kfree_skb(skb); - return NETDEV_TX_OK; + return NETDEV_TX_OK; } /* The EL3 interrupt handler. */ static irqreturn_t el3_interrupt(int irq, void *dev_id) { - struct net_device *dev = (struct net_device *) dev_id; - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr; - __u16 status; - int i = 0, handled = 1; + struct net_device *dev = (struct net_device *) dev_id; + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr; + __u16 status; + int i = 0, handled = 1; - if (!netif_device_present(dev)) - return IRQ_NONE; + if (!netif_device_present(dev)) + return IRQ_NONE; - ioaddr = dev->base_addr; + ioaddr = dev->base_addr; - netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); + netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); - spin_lock(&lp->lock); - while ((status = inw(ioaddr + EL3_STATUS)) & + spin_lock(&lp->lock); + while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete | StatsFull)) { - if ((status & 0xe000) != 0x2000) { - netdev_dbg(dev, "interrupt from dead card\n"); - handled = 0; - break; - } - if (status & RxComplete) - el3_rx(dev); - if (status & TxAvailable) { - netdev_dbg(dev, " TX room bit was handled.\n"); - /* There's room in the FIFO for a full-sized packet. */ - outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); - netif_wake_queue(dev); - } - if (status & TxComplete) - pop_tx_status(dev); - if (status & (AdapterFailure | RxEarly | StatsFull)) { - /* Handle all uncommon interrupts. */ - if (status & StatsFull) /* Empty statistics. */ - update_stats(dev); - if (status & RxEarly) { /* Rx early is unused. */ - el3_rx(dev); - outw(AckIntr | RxEarly, ioaddr + EL3_CMD); - } - if (status & AdapterFailure) { - u16 fifo_diag; - EL3WINDOW(4); - fifo_diag = inw(ioaddr + 4); - EL3WINDOW(1); - netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", + if ((status & 0xe000) != 0x2000) { + netdev_dbg(dev, "interrupt from dead card\n"); + handled = 0; + break; + } + if (status & RxComplete) + el3_rx(dev); + if (status & TxAvailable) { + netdev_dbg(dev, " TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); + } + if (status & TxComplete) + pop_tx_status(dev); + if (status & (AdapterFailure | RxEarly | StatsFull)) { + /* Handle all uncommon interrupts. */ + if (status & StatsFull) /* Empty statistics. */ + update_stats(dev); + if (status & RxEarly) { + /* Rx early is unused. */ + el3_rx(dev); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & AdapterFailure) { + u16 fifo_diag; + EL3WINDOW(4); + fifo_diag = inw(ioaddr + 4); + EL3WINDOW(1); + netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", fifo_diag); - if (fifo_diag & 0x0400) { - /* Tx overrun */ - tc589_wait_for_completion(dev, TxReset); - outw(TxEnable, ioaddr + EL3_CMD); + if (fifo_diag & 0x0400) { + /* Tx overrun */ + tc589_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + } + if (fifo_diag & 0x2000) { + /* Rx underrun */ + tc589_wait_for_completion(dev, RxReset); + set_rx_mode(dev); + outw(RxEnable, ioaddr + EL3_CMD); + } + outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); + } } - if (fifo_diag & 0x2000) { - /* Rx underrun */ - tc589_wait_for_completion(dev, RxReset); - set_rx_mode(dev); - outw(RxEnable, ioaddr + EL3_CMD); + if (++i > 10) { + netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", + status); + /* Clear all interrupts */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; } - outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); - } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); } - if (++i > 10) { - netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", - status); - /* Clear all interrupts */ - outw(AckIntr | 0xFF, ioaddr + EL3_CMD); - break; - } - /* Acknowledge the IRQ. */ - outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); - } - lp->last_irq = jiffies; - spin_unlock(&lp->lock); - netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", - inw(ioaddr + EL3_STATUS)); - return IRQ_RETVAL(handled); + lp->last_irq = jiffies; + spin_unlock(&lp->lock); + netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", + inw(ioaddr + EL3_STATUS)); + return IRQ_RETVAL(handled); } static void media_check(unsigned long arg) { - struct net_device *dev = (struct net_device *)(arg); - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u16 media, errs; - unsigned long flags; + struct net_device *dev = (struct net_device *)(arg); + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 media, errs; + unsigned long flags; - if (!netif_device_present(dev)) goto reschedule; + if (!netif_device_present(dev)) + goto reschedule; - /* Check for pending interrupt with expired latency timer: with - this, we can limp along even if the interrupt is blocked */ - if ((inw(ioaddr + EL3_STATUS) & IntLatch) && + /* Check for pending interrupt with expired latency timer: with + * this, we can limp along even if the interrupt is blocked + */ + if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + EL3_TIMER) == 0xff)) { - if (!lp->fast_poll) - netdev_warn(dev, "interrupt(s) dropped!\n"); - - local_irq_save(flags); - el3_interrupt(dev->irq, dev); - local_irq_restore(flags); - - lp->fast_poll = HZ; - } - if (lp->fast_poll) { - lp->fast_poll--; - lp->media.expires = jiffies + HZ/100; - add_timer(&lp->media); - return; - } - - /* lp->lock guards the EL3 window. Window should always be 1 except - when the lock is held */ - spin_lock_irqsave(&lp->lock, flags); - EL3WINDOW(4); - media = inw(ioaddr+WN4_MEDIA) & 0xc810; - - /* Ignore collisions unless we've had no irq's recently */ - if (time_before(jiffies, lp->last_irq + HZ)) { - media &= ~0x0010; - } else { - /* Try harder to detect carrier errors */ - EL3WINDOW(6); - outw(StatsDisable, ioaddr + EL3_CMD); - errs = inb(ioaddr + 0); - outw(StatsEnable, ioaddr + EL3_CMD); - dev->stats.tx_carrier_errors += errs; - if (errs || (lp->media_status & 0x0010)) media |= 0x0010; - } + if (!lp->fast_poll) + netdev_warn(dev, "interrupt(s) dropped!\n"); + + local_irq_save(flags); + el3_interrupt(dev->irq, dev); + local_irq_restore(flags); + + lp->fast_poll = HZ; + } + if (lp->fast_poll) { + lp->fast_poll--; + lp->media.expires = jiffies + HZ/100; + add_timer(&lp->media); + return; + } + + /* lp->lock guards the EL3 window. Window should always be 1 except + * when the lock is held + */ + + spin_lock_irqsave(&lp->lock, flags); + EL3WINDOW(4); + media = inw(ioaddr+WN4_MEDIA) & 0xc810; + + /* Ignore collisions unless we've had no irq's recently */ + if (time_before(jiffies, lp->last_irq + HZ)) { + media &= ~0x0010; + } else { + /* Try harder to detect carrier errors */ + EL3WINDOW(6); + outw(StatsDisable, ioaddr + EL3_CMD); + errs = inb(ioaddr + 0); + outw(StatsEnable, ioaddr + EL3_CMD); + dev->stats.tx_carrier_errors += errs; + if (errs || (lp->media_status & 0x0010)) + media |= 0x0010; + } - if (media != lp->media_status) { - if ((media & lp->media_status & 0x8000) && - ((lp->media_status ^ media) & 0x0800)) + if (media != lp->media_status) { + if ((media & lp->media_status & 0x8000) && + ((lp->media_status ^ media) & 0x0800)) netdev_info(dev, "%s link beat\n", - (lp->media_status & 0x0800 ? "lost" : "found")); - else if ((media & lp->media_status & 0x4000) && + (lp->media_status & 0x0800 ? "lost" : "found")); + else if ((media & lp->media_status & 0x4000) && ((lp->media_status ^ media) & 0x0010)) netdev_info(dev, "coax cable %s\n", - (lp->media_status & 0x0010 ? "ok" : "problem")); - if (dev->if_port == 0) { - if (media & 0x8000) { - if (media & 0x0800) - netdev_info(dev, "flipped to 10baseT\n"); - else + (lp->media_status & 0x0010 ? "ok" : "problem")); + if (dev->if_port == 0) { + if (media & 0x8000) { + if (media & 0x0800) + netdev_info(dev, "flipped to 10baseT\n"); + else tc589_set_xcvr(dev, 2); - } else if (media & 0x4000) { - if (media & 0x0010) - tc589_set_xcvr(dev, 1); - else - netdev_info(dev, "flipped to 10base2\n"); - } + } else if (media & 0x4000) { + if (media & 0x0010) + tc589_set_xcvr(dev, 1); + else + netdev_info(dev, "flipped to 10base2\n"); + } + } + lp->media_status = media; } - lp->media_status = media; - } - EL3WINDOW(1); - spin_unlock_irqrestore(&lp->lock, flags); + EL3WINDOW(1); + spin_unlock_irqrestore(&lp->lock, flags); reschedule: - lp->media.expires = jiffies + HZ; - add_timer(&lp->media); + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); } static struct net_device_stats *el3_get_stats(struct net_device *dev) { - struct el3_private *lp = netdev_priv(dev); - unsigned long flags; - struct pcmcia_device *link = lp->p_dev; + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + struct pcmcia_device *link = lp->p_dev; - if (pcmcia_dev_present(link)) { - spin_lock_irqsave(&lp->lock, flags); - update_stats(dev); - spin_unlock_irqrestore(&lp->lock, flags); - } - return &dev->stats; + if (pcmcia_dev_present(link)) { + spin_lock_irqsave(&lp->lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->lock, flags); + } + return &dev->stats; } -/* - Update statistics. We change to register window 6, so this should be run - single-threaded if the device is active. This is expected to be a rare - operation, and it's simpler for the rest of the driver to assume that - window 1 is always valid rather than use a special window-state variable. - - Caller must hold the lock for this +/* Update statistics. We change to register window 6, so this should be run +* single-threaded if the device is active. This is expected to be a rare +* operation, and it's simpler for the rest of the driver to assume that +* window 1 is always valid rather than use a special window-state variable. +* +* Caller must hold the lock for this */ + static void update_stats(struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - - netdev_dbg(dev, "updating the statistics.\n"); - /* Turn off statistics updates while reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - /* Switch to the stats window, and read everything. */ - EL3WINDOW(6); - dev->stats.tx_carrier_errors += inb(ioaddr + 0); - dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); - /* Multiple collisions. */ inb(ioaddr + 2); - dev->stats.collisions += inb(ioaddr + 3); - dev->stats.tx_window_errors += inb(ioaddr + 4); - dev->stats.rx_fifo_errors += inb(ioaddr + 5); - dev->stats.tx_packets += inb(ioaddr + 6); - /* Rx packets */ inb(ioaddr + 7); - /* Tx deferrals */ inb(ioaddr + 8); - /* Rx octets */ inw(ioaddr + 10); - /* Tx octets */ inw(ioaddr + 12); - - /* Back to window 1, and turn statistics back on. */ - EL3WINDOW(1); - outw(StatsEnable, ioaddr + EL3_CMD); + unsigned int ioaddr = dev->base_addr; + + netdev_dbg(dev, "updating the statistics.\n"); + /* Turn off statistics updates while reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ + inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + /* Rx packets */ + inb(ioaddr + 7); + /* Tx deferrals */ + inb(ioaddr + 8); + /* Rx octets */ + inw(ioaddr + 10); + /* Tx octets */ + inw(ioaddr + 12); + + /* Back to window 1, and turn statistics back on. */ + EL3WINDOW(1); + outw(StatsEnable, ioaddr + EL3_CMD); } static int el3_rx(struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - int worklimit = 32; - short rx_status; + unsigned int ioaddr = dev->base_addr; + int worklimit = 32; + short rx_status; - netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", + netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); - while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && + while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && worklimit > 0) { - worklimit--; - if (rx_status & 0x4000) { /* Error, update stats. */ - short error = rx_status & 0x3800; - dev->stats.rx_errors++; - switch (error) { - case 0x0000: dev->stats.rx_over_errors++; break; - case 0x0800: dev->stats.rx_length_errors++; break; - case 0x1000: dev->stats.rx_frame_errors++; break; - case 0x1800: dev->stats.rx_length_errors++; break; - case 0x2000: dev->stats.rx_frame_errors++; break; - case 0x2800: dev->stats.rx_crc_errors++; break; - } - } else { - short pkt_len = rx_status & 0x7ff; - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, pkt_len + 5); - - netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", + worklimit--; + if (rx_status & 0x4000) { /* Error, update stats. */ + short error = rx_status & 0x3800; + dev->stats.rx_errors++; + switch (error) { + case 0x0000: + dev->stats.rx_over_errors++; + break; + case 0x0800: + dev->stats.rx_length_errors++; + break; + case 0x1000: + dev->stats.rx_frame_errors++; + break; + case 0x1800: + dev->stats.rx_length_errors++; + break; + case 0x2000: + dev->stats.rx_frame_errors++; + break; + case 0x2800: + dev->stats.rx_crc_errors++; + break; + } + } else { + short pkt_len = rx_status & 0x7ff; + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 5); + + netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); - if (skb != NULL) { - skb_reserve(skb, 2); - insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), + if (skb != NULL) { + skb_reserve(skb, 2); + insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), (pkt_len+3)>>2); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } else { - netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } else { + netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", pkt_len); - dev->stats.rx_dropped++; - } + dev->stats.rx_dropped++; + } + } + /* Pop the top of the Rx FIFO */ + tc589_wait_for_completion(dev, RxDiscard); } - /* Pop the top of the Rx FIFO */ - tc589_wait_for_completion(dev, RxDiscard); - } - if (worklimit == 0) - netdev_warn(dev, "too much work in el3_rx!\n"); - return 0; + if (worklimit == 0) + netdev_warn(dev, "too much work in el3_rx!\n"); + return 0; } static void set_rx_mode(struct net_device *dev) { - unsigned int ioaddr = dev->base_addr; - u16 opts = SetRxFilter | RxStation | RxBroadcast; - - if (dev->flags & IFF_PROMISC) - opts |= RxMulticast | RxProm; - else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) - opts |= RxMulticast; - outw(opts, ioaddr + EL3_CMD); + unsigned int ioaddr = dev->base_addr; + u16 opts = SetRxFilter | RxStation | RxBroadcast; + + if (dev->flags & IFF_PROMISC) + opts |= RxMulticast | RxProm; + else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) + opts |= RxMulticast; + outw(opts, ioaddr + EL3_CMD); } static void set_multicast_list(struct net_device *dev) @@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev) static int el3_close(struct net_device *dev) { - struct el3_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - unsigned int ioaddr = dev->base_addr; - - dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + unsigned int ioaddr = dev->base_addr; + + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + + if (pcmcia_dev_present(link)) { + /* Turn off statistics ASAP. We update dev->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (dev->if_port == 2) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + else if (dev->if_port == 1) { + /* Disable link beat and jabber */ + EL3WINDOW(4); + outw(0, ioaddr + WN4_MEDIA); + } - if (pcmcia_dev_present(link)) { - /* Turn off statistics ASAP. We update dev->stats below. */ - outw(StatsDisable, ioaddr + EL3_CMD); + /* Switching back to window 0 disables the IRQ. */ + EL3WINDOW(0); + /* But we explicitly zero the IRQ line select anyway. */ + outw(0x0f00, ioaddr + WN0_IRQ); - /* Disable the receiver and transmitter. */ - outw(RxDisable, ioaddr + EL3_CMD); - outw(TxDisable, ioaddr + EL3_CMD); - - if (dev->if_port == 2) - /* Turn off thinnet power. Green! */ - outw(StopCoax, ioaddr + EL3_CMD); - else if (dev->if_port == 1) { - /* Disable link beat and jabber */ - EL3WINDOW(4); - outw(0, ioaddr + WN4_MEDIA); + /* Check if the card still exists */ + if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) + update_stats(dev); } - /* Switching back to window 0 disables the IRQ. */ - EL3WINDOW(0); - /* But we explicitly zero the IRQ line select anyway. */ - outw(0x0f00, ioaddr + WN0_IRQ); - - /* Check if the card still exists */ - if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) - update_stats(dev); - } - - link->open--; - netif_stop_queue(dev); - del_timer_sync(&lp->media); + link->open--; + netif_stop_queue(dev); + del_timer_sync(&lp->media); - return 0; + return 0; } static const struct pcmcia_device_id tc589_ids[] = { diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 238ccea965c8..61477b8e8d24 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) /* ... and the packet rounded to a doubleword. */ skb_tx_timestamp(skb); iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); - dev_kfree_skb (skb); + dev_consume_skb_any (skb); if (ioread16(ioaddr + TxFree) > 1536) { netif_start_queue (dev); /* AKPM: redundant? */ } else { diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c index d2cd80444ade..599311f0e05c 100644 --- a/drivers/net/ethernet/8390/lib8390.c +++ b/drivers/net/ethernet/8390/lib8390.c @@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb, spin_unlock(&ei_local->page_lock); enable_irq_lockdep_irqrestore(dev->irq, &flags); skb_tx_timestamp(skb); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); dev->stats.tx_bytes += send_length; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 506b0248c400..39b26fe28d10 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -22,6 +22,7 @@ source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" +source "drivers/net/ethernet/altera/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apple/Kconfig" source "drivers/net/ethernet/arc/Kconfig" @@ -149,6 +150,7 @@ config S6GMAC To compile this driver as a module, choose M here. The module will be called s6gmac. +source "drivers/net/ethernet/samsung/Kconfig" source "drivers/net/ethernet/seeq/Kconfig" source "drivers/net/ethernet/silan/Kconfig" source "drivers/net/ethernet/sis/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index c0b8789952e7..545d0b3b9cb4 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ +obj-$(CONFIG_ALTERA_TSE) += altera/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ @@ -60,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_SH_ETH) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ obj-$(CONFIG_S6GMAC) += s6gmac.o +obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ obj-$(CONFIG_NET_VENDOR_SIS) += sis/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index c0f68dcd1dc1..7ae74d450e8f 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -307,11 +307,6 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, return bfin_mdio_poll(); } -static int bfin_mdiobus_reset(struct mii_bus *bus) -{ - return 0; -} - static void bfin_mac_adjust_link(struct net_device *dev) { struct bfin_mac_local *lp = netdev_priv(dev); @@ -1040,6 +1035,7 @@ static struct ptp_clock_info bfin_ptp_caps = { .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, + .n_pins = 0, .pps = 0, .adjfreq = bfin_ptp_adjfreq, .adjtime = bfin_ptp_adjtime, @@ -1086,7 +1082,7 @@ static inline void _tx_reclaim_skb(void) tx_list_head->desc_a.config &= ~DMAEN; tx_list_head->status.status_word = 0; if (tx_list_head->skb) { - dev_kfree_skb(tx_list_head->skb); + dev_consume_skb_any(tx_list_head->skb); tx_list_head->skb = NULL; } tx_list_head = tx_list_head->next; @@ -1823,7 +1819,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev) goto out_err_alloc; miibus->read = bfin_mdiobus_read; miibus->write = bfin_mdiobus_write; - miibus->reset = bfin_mdiobus_reset; miibus->parent = &pdev->dev; miibus->name = "bfin_mii_bus"; diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index c5d75e7aeeb6..23578dfee249 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1213,11 +1213,6 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) return 0; } -static int greth_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - static void greth_link_change(struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); @@ -1332,7 +1327,6 @@ static int greth_mdio_init(struct greth_private *greth) snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq); greth->mdio->read = greth_mdio_read; greth->mdio->write = greth_mdio_write; - greth->mdio->reset = greth_mdio_reset; greth->mdio->priv = greth; greth->mdio->irq = greth->mdio_irqs; diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 511f6eecd58b..fcaeeb8a4929 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&db->lock, flags); /* free this SKB */ - dev_kfree_skb(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig new file mode 100644 index 000000000000..80c1ab74a4b8 --- /dev/null +++ b/drivers/net/ethernet/altera/Kconfig @@ -0,0 +1,8 @@ +config ALTERA_TSE + tristate "Altera Triple-Speed Ethernet MAC support" + select PHYLIB + ---help--- + This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. + + To compile this driver as a module, choose M here. The module + will be called alteratse. diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile new file mode 100644 index 000000000000..d4a187e45369 --- /dev/null +++ b/drivers/net/ethernet/altera/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Altera device drivers. +# + +obj-$(CONFIG_ALTERA_TSE) += altera_tse.o +altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ +altera_msgdma.o altera_sgdma.o altera_utils.o diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c new file mode 100644 index 000000000000..3df18669ea30 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -0,0 +1,202 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/netdevice.h> +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_msgdmahw.h" + +/* No initialization work to do for MSGDMA */ +int msgdma_initialize(struct altera_tse_private *priv) +{ + return 0; +} + +void msgdma_uninitialize(struct altera_tse_private *priv) +{ +} + +void msgdma_reset(struct altera_tse_private *priv) +{ + int counter; + struct msgdma_csr *txcsr = + (struct msgdma_csr *)priv->tx_dma_csr; + struct msgdma_csr *rxcsr = + (struct msgdma_csr *)priv->rx_dma_csr; + + /* Reset Rx mSGDMA */ + iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); + iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(&rxcsr->status, + MSGDMA_CSR_STAT_RESETTING)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) + netif_warn(priv, drv, priv->dev, + "TSE Rx mSGDMA resetting bit never cleared!\n"); + + /* clear all status bits */ + iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); + + /* Reset Tx mSGDMA */ + iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); + iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(&txcsr->status, + MSGDMA_CSR_STAT_RESETTING)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) + netif_warn(priv, drv, priv->dev, + "TSE Tx mSGDMA resetting bit never cleared!\n"); + + /* clear all status bits */ + iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); +} + +void msgdma_disable_rxirq(struct altera_tse_private *priv) +{ + struct msgdma_csr *csr = priv->rx_dma_csr; + tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_enable_rxirq(struct altera_tse_private *priv) +{ + struct msgdma_csr *csr = priv->rx_dma_csr; + tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_disable_txirq(struct altera_tse_private *priv) +{ + struct msgdma_csr *csr = priv->tx_dma_csr; + tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_enable_txirq(struct altera_tse_private *priv) +{ + struct msgdma_csr *csr = priv->tx_dma_csr; + tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_clear_rxirq(struct altera_tse_private *priv) +{ + struct msgdma_csr *csr = priv->rx_dma_csr; + iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); +} + +void msgdma_clear_txirq(struct altera_tse_private *priv) +{ + struct msgdma_csr *csr = priv->tx_dma_csr; + iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); +} + +/* return 0 to indicate transmit is pending */ +int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + struct msgdma_extended_desc *desc = priv->tx_dma_desc; + + iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); + iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); + iowrite32(0, &desc->write_addr_lo); + iowrite32(0, &desc->write_addr_hi); + iowrite32(buffer->len, &desc->len); + iowrite32(0, &desc->burst_seq_num); + iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); + iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); + return 0; +} + +u32 msgdma_tx_completions(struct altera_tse_private *priv) +{ + u32 ready = 0; + u32 inuse; + u32 status; + struct msgdma_csr *txcsr = + (struct msgdma_csr *)priv->tx_dma_csr; + + /* Get number of sent descriptors */ + inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; + + if (inuse) { /* Tx FIFO is not empty */ + ready = priv->tx_prod - priv->tx_cons - inuse - 1; + } else { + /* Check for buffered last packet */ + status = ioread32(&txcsr->status); + if (status & MSGDMA_CSR_STAT_BUSY) + ready = priv->tx_prod - priv->tx_cons - 1; + else + ready = priv->tx_prod - priv->tx_cons; + } + return ready; +} + +/* Put buffer to the mSGDMA RX FIFO + */ +int msgdma_add_rx_desc(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer) +{ + struct msgdma_extended_desc *desc = priv->rx_dma_desc; + u32 len = priv->rx_dma_buf_sz; + dma_addr_t dma_addr = rxbuffer->dma_addr; + u32 control = (MSGDMA_DESC_CTL_END_ON_EOP + | MSGDMA_DESC_CTL_END_ON_LEN + | MSGDMA_DESC_CTL_TR_COMP_IRQ + | MSGDMA_DESC_CTL_EARLY_IRQ + | MSGDMA_DESC_CTL_TR_ERR_IRQ + | MSGDMA_DESC_CTL_GO); + + iowrite32(0, &desc->read_addr_lo); + iowrite32(0, &desc->read_addr_hi); + iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); + iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); + iowrite32(len, &desc->len); + iowrite32(0, &desc->burst_seq_num); + iowrite32(0x00010001, &desc->stride); + iowrite32(control, &desc->control); + return 1; +} + +/* status is returned on upper 16 bits, + * length is returned in lower 16 bits + */ +u32 msgdma_rx_status(struct altera_tse_private *priv) +{ + u32 rxstatus = 0; + u32 pktlength; + u32 pktstatus; + struct msgdma_csr *rxcsr = + (struct msgdma_csr *)priv->rx_dma_csr; + struct msgdma_response *rxresp = + (struct msgdma_response *)priv->rx_dma_resp; + + if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { + pktlength = ioread32(&rxresp->bytes_transferred); + pktstatus = ioread32(&rxresp->status); + rxstatus = pktstatus; + rxstatus = rxstatus << 16; + rxstatus |= (pktlength & 0xffff); + } + return rxstatus; +} diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h new file mode 100644 index 000000000000..7f0f5bf2bba2 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma.h @@ -0,0 +1,34 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_MSGDMA_H__ +#define __ALTERA_MSGDMA_H__ + +void msgdma_reset(struct altera_tse_private *); +void msgdma_enable_txirq(struct altera_tse_private *); +void msgdma_enable_rxirq(struct altera_tse_private *); +void msgdma_disable_rxirq(struct altera_tse_private *); +void msgdma_disable_txirq(struct altera_tse_private *); +void msgdma_clear_rxirq(struct altera_tse_private *); +void msgdma_clear_txirq(struct altera_tse_private *); +u32 msgdma_tx_completions(struct altera_tse_private *); +int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); +int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); +u32 msgdma_rx_status(struct altera_tse_private *); +int msgdma_initialize(struct altera_tse_private *); +void msgdma_uninitialize(struct altera_tse_private *); + +#endif /* __ALTERA_MSGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h new file mode 100644 index 000000000000..d7b59ba4019c --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -0,0 +1,167 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_MSGDMAHW_H__ +#define __ALTERA_MSGDMAHW_H__ + +/* mSGDMA standard descriptor format + */ +struct msgdma_desc { + u32 read_addr; /* data buffer source address */ + u32 write_addr; /* data buffer destination address */ + u32 len; /* the number of bytes to transfer per descriptor */ + u32 control; /* characteristics of the transfer */ +}; + +/* mSGDMA extended descriptor format + */ +struct msgdma_extended_desc { + u32 read_addr_lo; /* data buffer source address low bits */ + u32 write_addr_lo; /* data buffer destination address low bits */ + u32 len; /* the number of bytes to transfer + * per descriptor + */ + u32 burst_seq_num; /* bit 31:24 write burst + * bit 23:16 read burst + * bit 15:0 sequence number + */ + u32 stride; /* bit 31:16 write stride + * bit 15:0 read stride + */ + u32 read_addr_hi; /* data buffer source address high bits */ + u32 write_addr_hi; /* data buffer destination address high bits */ + u32 control; /* characteristics of the transfer */ +}; + +/* mSGDMA descriptor control field bit definitions + */ +#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff) +#define MSGDMA_DESC_CTL_GEN_SOP BIT(8) +#define MSGDMA_DESC_CTL_GEN_EOP BIT(9) +#define MSGDMA_DESC_CTL_PARK_READS BIT(10) +#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11) +#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12) +#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13) +#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14) +#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15) +#define MSGDMA_DESC_CTL_TR_ERR_IRQ (0xff << 16) +#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24) +/* Writing ‘1’ to the ‘go’ bit commits the entire descriptor into the + * descriptor FIFO(s) + */ +#define MSGDMA_DESC_CTL_GO BIT(31) + +/* Tx buffer control flags + */ +#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ + MSGDMA_DESC_CTL_GEN_EOP | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \ + MSGDMA_DESC_CTL_END_ON_LEN | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_EARLY_IRQ | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +/* mSGDMA extended descriptor stride definitions + */ +#define MSGDMA_DESC_TX_STRIDE (0x00010001) +#define MSGDMA_DESC_RX_STRIDE (0x00010001) + +/* mSGDMA dispatcher control and status register map + */ +struct msgdma_csr { + u32 status; /* Read/Clear */ + u32 control; /* Read/Write */ + u32 rw_fill_level; /* bit 31:16 - write fill level + * bit 15:0 - read fill level + */ + u32 resp_fill_level; /* bit 15:0 */ + u32 rw_seq_num; /* bit 31:16 - write sequence number + * bit 15:0 - read sequence number + */ + u32 pad[3]; /* reserved */ +}; + +/* mSGDMA CSR status register bit definitions + */ +#define MSGDMA_CSR_STAT_BUSY BIT(0) +#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1) +#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2) +#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3) +#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4) +#define MSGDMA_CSR_STAT_STOPPED BIT(5) +#define MSGDMA_CSR_STAT_RESETTING BIT(6) +#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7) +#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8) +#define MSGDMA_CSR_STAT_IRQ BIT(9) +#define MSGDMA_CSR_STAT_MASK 0x3FF +#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ 0x1FF + +#define MSGDMA_CSR_STAT_BUSY_GET(v) GET_BIT_VALUE(v, 0) +#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 1) +#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v) GET_BIT_VALUE(v, 2) +#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 3) +#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v) GET_BIT_VALUE(v, 4) +#define MSGDMA_CSR_STAT_STOPPED_GET(v) GET_BIT_VALUE(v, 5) +#define MSGDMA_CSR_STAT_RESETTING_GET(v) GET_BIT_VALUE(v, 6) +#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v) GET_BIT_VALUE(v, 7) +#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v) GET_BIT_VALUE(v, 8) +#define MSGDMA_CSR_STAT_IRQ_GET(v) GET_BIT_VALUE(v, 9) + +/* mSGDMA CSR control register bit definitions + */ +#define MSGDMA_CSR_CTL_STOP BIT(0) +#define MSGDMA_CSR_CTL_RESET BIT(1) +#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2) +#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3) +#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4) +#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5) + +/* mSGDMA CSR fill level bits + */ +#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16) +#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) +#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) + +/* mSGDMA response register map + */ +struct msgdma_response { + u32 bytes_transferred; + u32 status; +}; + +/* mSGDMA response register bit definitions + */ +#define MSGDMA_RESP_EARLY_TERM BIT(8) +#define MSGDMA_RESP_ERR_MASK 0xFF + +#endif /* __ALTERA_MSGDMA_H__*/ diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c new file mode 100644 index 000000000000..0ee96639ae44 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -0,0 +1,509 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/list.h> +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_sgdmahw.h" +#include "altera_sgdma.h" + +static void sgdma_descrip(struct sgdma_descrip *desc, + struct sgdma_descrip *ndesc, + dma_addr_t ndesc_phys, + dma_addr_t raddr, + dma_addr_t waddr, + u16 length, + int generate_eop, + int rfixed, + int wfixed); + +static int sgdma_async_write(struct altera_tse_private *priv, + struct sgdma_descrip *desc); + +static int sgdma_async_read(struct altera_tse_private *priv); + +static dma_addr_t +sgdma_txphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip *desc); + +static dma_addr_t +sgdma_rxphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip *desc); + +static int sgdma_txbusy(struct altera_tse_private *priv); + +static int sgdma_rxbusy(struct altera_tse_private *priv); + +static void +queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer); + +static void +queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer); + +static struct tse_buffer * +dequeue_tx(struct altera_tse_private *priv); + +static struct tse_buffer * +dequeue_rx(struct altera_tse_private *priv); + +static struct tse_buffer * +queue_rx_peekhead(struct altera_tse_private *priv); + +int sgdma_initialize(struct altera_tse_private *priv) +{ + priv->txctrlreg = SGDMA_CTRLREG_ILASTD; + + priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | + SGDMA_CTRLREG_ILASTD; + + INIT_LIST_HEAD(&priv->txlisthd); + INIT_LIST_HEAD(&priv->rxlisthd); + + priv->rxdescphys = (dma_addr_t) 0; + priv->txdescphys = (dma_addr_t) 0; + + priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, + priv->rxdescmem, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(priv->device, priv->rxdescphys)) { + sgdma_uninitialize(priv); + netdev_err(priv->dev, "error mapping rx descriptor memory\n"); + return -EINVAL; + } + + priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, + priv->txdescmem, DMA_TO_DEVICE); + + if (dma_mapping_error(priv->device, priv->txdescphys)) { + sgdma_uninitialize(priv); + netdev_err(priv->dev, "error mapping tx descriptor memory\n"); + return -EINVAL; + } + + return 0; +} + +void sgdma_uninitialize(struct altera_tse_private *priv) +{ + if (priv->rxdescphys) + dma_unmap_single(priv->device, priv->rxdescphys, + priv->rxdescmem, DMA_BIDIRECTIONAL); + + if (priv->txdescphys) + dma_unmap_single(priv->device, priv->txdescphys, + priv->txdescmem, DMA_TO_DEVICE); +} + +/* This function resets the SGDMA controller and clears the + * descriptor memory used for transmits and receives. + */ +void sgdma_reset(struct altera_tse_private *priv) +{ + u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc; + u32 txdescriplen = priv->txdescmem; + u32 *prxdescripmem = (u32 *)priv->rx_dma_desc; + u32 rxdescriplen = priv->rxdescmem; + struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr; + struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr; + + /* Initialize descriptor memory to 0 */ + memset(ptxdescripmem, 0, txdescriplen); + memset(prxdescripmem, 0, rxdescriplen); + + iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); + iowrite32(0, &ptxsgdma->control); + + iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); + iowrite32(0, &prxsgdma->control); +} + +void sgdma_enable_rxirq(struct altera_tse_private *priv) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; + priv->rxctrlreg |= SGDMA_CTRLREG_INTEN; + tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN); +} + +void sgdma_enable_txirq(struct altera_tse_private *priv) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; + priv->txctrlreg |= SGDMA_CTRLREG_INTEN; + tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN); +} + +/* for SGDMA, RX interrupts remain enabled after enabling */ +void sgdma_disable_rxirq(struct altera_tse_private *priv) +{ +} + +/* for SGDMA, TX interrupts remain enabled after enabling */ +void sgdma_disable_txirq(struct altera_tse_private *priv) +{ +} + +void sgdma_clear_rxirq(struct altera_tse_private *priv) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; + tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); +} + +void sgdma_clear_txirq(struct altera_tse_private *priv) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; + tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); +} + +/* transmits buffer through SGDMA. Returns number of buffers + * transmitted, 0 if not possible. + * + * tx_lock is held by the caller + */ +int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + int pktstx = 0; + struct sgdma_descrip *descbase = + (struct sgdma_descrip *)priv->tx_dma_desc; + + struct sgdma_descrip *cdesc = &descbase[0]; + struct sgdma_descrip *ndesc = &descbase[1]; + + /* wait 'til the tx sgdma is ready for the next transmit request */ + if (sgdma_txbusy(priv)) + return 0; + + sgdma_descrip(cdesc, /* current descriptor */ + ndesc, /* next descriptor */ + sgdma_txphysaddr(priv, ndesc), + buffer->dma_addr, /* address of packet to xmit */ + 0, /* write addr 0 for tx dma */ + buffer->len, /* length of packet */ + SGDMA_CONTROL_EOP, /* Generate EOP */ + 0, /* read fixed */ + SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ + + pktstx = sgdma_async_write(priv, cdesc); + + /* enqueue the request to the pending transmit queue */ + queue_tx(priv, buffer); + + return 1; +} + + +/* tx_lock held to protect access to queued tx list + */ +u32 sgdma_tx_completions(struct altera_tse_private *priv) +{ + u32 ready = 0; + struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc; + + if (!sgdma_txbusy(priv) && + ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && + (dequeue_tx(priv))) { + ready = 1; + } + + return ready; +} + +int sgdma_add_rx_desc(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer) +{ + queue_rx(priv, rxbuffer); + return sgdma_async_read(priv); +} + +/* status is returned on upper 16 bits, + * length is returned in lower 16 bits + */ +u32 sgdma_rx_status(struct altera_tse_private *priv) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; + struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; + struct sgdma_descrip *desc = NULL; + int pktsrx; + unsigned int rxstatus = 0; + unsigned int pktlength = 0; + unsigned int pktstatus = 0; + struct tse_buffer *rxbuffer = NULL; + + dma_sync_single_for_cpu(priv->device, + priv->rxdescphys, + priv->rxdescmem, + DMA_BIDIRECTIONAL); + + desc = &base[0]; + if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) || + (desc->status & SGDMA_STATUS_EOP)) { + pktlength = desc->bytes_xferred; + pktstatus = desc->status & 0x3f; + rxstatus = pktstatus; + rxstatus = rxstatus << 16; + rxstatus |= (pktlength & 0xffff); + + desc->status = 0; + + rxbuffer = dequeue_rx(priv); + if (rxbuffer == NULL) + netdev_err(priv->dev, + "sgdma rx and rx queue empty!\n"); + + /* kick the rx sgdma after reaping this descriptor */ + pktsrx = sgdma_async_read(priv); + } + + return rxstatus; +} + + +/* Private functions */ +static void sgdma_descrip(struct sgdma_descrip *desc, + struct sgdma_descrip *ndesc, + dma_addr_t ndesc_phys, + dma_addr_t raddr, + dma_addr_t waddr, + u16 length, + int generate_eop, + int rfixed, + int wfixed) +{ + /* Clear the next descriptor as not owned by hardware */ + u32 ctrl = ndesc->control; + ctrl &= ~SGDMA_CONTROL_HW_OWNED; + ndesc->control = ctrl; + + ctrl = 0; + ctrl = SGDMA_CONTROL_HW_OWNED; + ctrl |= generate_eop; + ctrl |= rfixed; + ctrl |= wfixed; + + /* Channel is implicitly zero, initialized to 0 by default */ + + desc->raddr = raddr; + desc->waddr = waddr; + desc->next = lower_32_bits(ndesc_phys); + desc->control = ctrl; + desc->status = 0; + desc->rburst = 0; + desc->wburst = 0; + desc->bytes = length; + desc->bytes_xferred = 0; +} + +/* If hardware is busy, don't restart async read. + * if status register is 0 - meaning initial state, restart async read, + * probably for the first time when populating a receive buffer. + * If read status indicate not busy and a status, restart the async + * DMA read. + */ +static int sgdma_async_read(struct altera_tse_private *priv) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; + struct sgdma_descrip *descbase = + (struct sgdma_descrip *)priv->rx_dma_desc; + + struct sgdma_descrip *cdesc = &descbase[0]; + struct sgdma_descrip *ndesc = &descbase[1]; + + unsigned int sts = ioread32(&csr->status); + struct tse_buffer *rxbuffer = NULL; + + if (!sgdma_rxbusy(priv)) { + rxbuffer = queue_rx_peekhead(priv); + if (rxbuffer == NULL) + return 0; + + sgdma_descrip(cdesc, /* current descriptor */ + ndesc, /* next descriptor */ + sgdma_rxphysaddr(priv, ndesc), + 0, /* read addr 0 for rx dma */ + rxbuffer->dma_addr, /* write addr for rx dma */ + 0, /* read 'til EOP */ + 0, /* EOP: NA for rx dma */ + 0, /* read fixed: NA for rx dma */ + 0); /* SOP: NA for rx DMA */ + + /* clear control and status */ + iowrite32(0, &csr->control); + + /* If status available, clear those bits */ + if (sts & 0xf) + iowrite32(0xf, &csr->status); + + dma_sync_single_for_device(priv->device, + priv->rxdescphys, + priv->rxdescmem, + DMA_BIDIRECTIONAL); + + iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), + &csr->next_descrip); + + iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), + &csr->control); + + return 1; + } + + return 0; +} + +static int sgdma_async_write(struct altera_tse_private *priv, + struct sgdma_descrip *desc) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; + + if (sgdma_txbusy(priv)) + return 0; + + /* clear control and status */ + iowrite32(0, &csr->control); + iowrite32(0x1f, &csr->status); + + dma_sync_single_for_device(priv->device, priv->txdescphys, + priv->txdescmem, DMA_TO_DEVICE); + + iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), + &csr->next_descrip); + + iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), + &csr->control); + + return 1; +} + +static dma_addr_t +sgdma_txphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip *desc) +{ + dma_addr_t paddr = priv->txdescmem_busaddr; + uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; + return (dma_addr_t)((uintptr_t)paddr + offs); +} + +static dma_addr_t +sgdma_rxphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip *desc) +{ + dma_addr_t paddr = priv->rxdescmem_busaddr; + uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; + return (dma_addr_t)((uintptr_t)paddr + offs); +} + +#define list_remove_head(list, entry, type, member) \ + do { \ + entry = NULL; \ + if (!list_empty(list)) { \ + entry = list_entry((list)->next, type, member); \ + list_del_init(&entry->member); \ + } \ + } while (0) + +#define list_peek_head(list, entry, type, member) \ + do { \ + entry = NULL; \ + if (!list_empty(list)) { \ + entry = list_entry((list)->next, type, member); \ + } \ + } while (0) + +/* adds a tse_buffer to the tail of a tx buffer list. + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static void +queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + list_add_tail(&buffer->lh, &priv->txlisthd); +} + + +/* adds a tse_buffer to the tail of a rx buffer list + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static void +queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + list_add_tail(&buffer->lh, &priv->rxlisthd); +} + +/* dequeues a tse_buffer from the transmit buffer list, otherwise + * returns NULL if empty. + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static struct tse_buffer * +dequeue_tx(struct altera_tse_private *priv) +{ + struct tse_buffer *buffer = NULL; + list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh); + return buffer; +} + +/* dequeues a tse_buffer from the receive buffer list, otherwise + * returns NULL if empty + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static struct tse_buffer * +dequeue_rx(struct altera_tse_private *priv) +{ + struct tse_buffer *buffer = NULL; + list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); + return buffer; +} + +/* dequeues a tse_buffer from the receive buffer list, otherwise + * returns NULL if empty + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list while the + * head is being examined. + */ +static struct tse_buffer * +queue_rx_peekhead(struct altera_tse_private *priv) +{ + struct tse_buffer *buffer = NULL; + list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); + return buffer; +} + +/* check and return rx sgdma status without polling + */ +static int sgdma_rxbusy(struct altera_tse_private *priv) +{ + struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; + return ioread32(&csr->status) & SGDMA_STSREG_BUSY; +} + +/* waits for the tx sgdma to finish it's current operation, returns 0 + * when it transitions to nonbusy, returns 1 if the operation times out + */ +static int sgdma_txbusy(struct altera_tse_private *priv) +{ + int delay = 0; + struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; + + /* if DMA is busy, wait for current transactino to finish */ + while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) + udelay(1); + + if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { + netdev_err(priv->dev, "timeout waiting for tx dma\n"); + return 1; + } + return 0; +} diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h new file mode 100644 index 000000000000..07d471729dc4 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdma.h @@ -0,0 +1,35 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_SGDMA_H__ +#define __ALTERA_SGDMA_H__ + +void sgdma_reset(struct altera_tse_private *); +void sgdma_enable_txirq(struct altera_tse_private *); +void sgdma_enable_rxirq(struct altera_tse_private *); +void sgdma_disable_rxirq(struct altera_tse_private *); +void sgdma_disable_txirq(struct altera_tse_private *); +void sgdma_clear_rxirq(struct altera_tse_private *); +void sgdma_clear_txirq(struct altera_tse_private *); +int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); +u32 sgdma_tx_completions(struct altera_tse_private *); +int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); +void sgdma_status(struct altera_tse_private *); +u32 sgdma_rx_status(struct altera_tse_private *); +int sgdma_initialize(struct altera_tse_private *); +void sgdma_uninitialize(struct altera_tse_private *); + +#endif /* __ALTERA_SGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h new file mode 100644 index 000000000000..ba3334f35383 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -0,0 +1,124 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_SGDMAHW_H__ +#define __ALTERA_SGDMAHW_H__ + +/* SGDMA descriptor structure */ +struct sgdma_descrip { + unsigned int raddr; /* address of data to be read */ + unsigned int pad1; + unsigned int waddr; + unsigned int pad2; + unsigned int next; + unsigned int pad3; + unsigned short bytes; + unsigned char rburst; + unsigned char wburst; + unsigned short bytes_xferred; /* 16 bits, bytes xferred */ + + /* bit 0: error + * bit 1: length error + * bit 2: crc error + * bit 3: truncated error + * bit 4: phy error + * bit 5: collision error + * bit 6: reserved + * bit 7: status eop for recv case + */ + unsigned char status; + + /* bit 0: eop + * bit 1: read_fixed + * bit 2: write fixed + * bits 3,4,5,6: Channel (always 0) + * bit 7: hardware owned + */ + unsigned char control; +} __packed; + + +#define SGDMA_STATUS_ERR BIT(0) +#define SGDMA_STATUS_LENGTH_ERR BIT(1) +#define SGDMA_STATUS_CRC_ERR BIT(2) +#define SGDMA_STATUS_TRUNC_ERR BIT(3) +#define SGDMA_STATUS_PHY_ERR BIT(4) +#define SGDMA_STATUS_COLL_ERR BIT(5) +#define SGDMA_STATUS_EOP BIT(7) + +#define SGDMA_CONTROL_EOP BIT(0) +#define SGDMA_CONTROL_RD_FIXED BIT(1) +#define SGDMA_CONTROL_WR_FIXED BIT(2) + +/* Channel is always 0, so just zero initialize it */ + +#define SGDMA_CONTROL_HW_OWNED BIT(7) + +/* SGDMA register space */ +struct sgdma_csr { + /* bit 0: error + * bit 1: eop + * bit 2: descriptor completed + * bit 3: chain completed + * bit 4: busy + * remainder reserved + */ + u32 status; + u32 pad1[3]; + + /* bit 0: interrupt on error + * bit 1: interrupt on eop + * bit 2: interrupt after every descriptor + * bit 3: interrupt after last descrip in a chain + * bit 4: global interrupt enable + * bit 5: starts descriptor processing + * bit 6: stop core on dma error + * bit 7: interrupt on max descriptors + * bits 8-15: max descriptors to generate interrupt + * bit 16: Software reset + * bit 17: clears owned by hardware if 0, does not clear otherwise + * bit 18: enables descriptor polling mode + * bit 19-26: clocks before polling again + * bit 27-30: reserved + * bit 31: clear interrupt + */ + u32 control; + u32 pad2[3]; + u32 next_descrip; + u32 pad3[3]; +}; + + +#define SGDMA_STSREG_ERR BIT(0) /* Error */ +#define SGDMA_STSREG_EOP BIT(1) /* EOP */ +#define SGDMA_STSREG_DESCRIP BIT(2) /* Descriptor completed */ +#define SGDMA_STSREG_CHAIN BIT(3) /* Chain completed */ +#define SGDMA_STSREG_BUSY BIT(4) /* Controller busy */ + +#define SGDMA_CTRLREG_IOE BIT(0) /* Interrupt on error */ +#define SGDMA_CTRLREG_IOEOP BIT(1) /* Interrupt on EOP */ +#define SGDMA_CTRLREG_IDESCRIP BIT(2) /* Interrupt after every descriptor */ +#define SGDMA_CTRLREG_ILASTD BIT(3) /* Interrupt after last descriptor */ +#define SGDMA_CTRLREG_INTEN BIT(4) /* Global Interrupt enable */ +#define SGDMA_CTRLREG_START BIT(5) /* starts descriptor processing */ +#define SGDMA_CTRLREG_STOPERR BIT(6) /* stop on dma error */ +#define SGDMA_CTRLREG_INTMAX BIT(7) /* Interrupt on max descriptors */ +#define SGDMA_CTRLREG_RESET BIT(16)/* Software reset */ +#define SGDMA_CTRLREG_COBHW BIT(17)/* Clears owned by hardware */ +#define SGDMA_CTRLREG_POLL BIT(18)/* enables descriptor polling mode */ +#define SGDMA_CTRLREG_CLRINT BIT(31)/* Clears interrupt */ + +#endif /* __ALTERA_SGDMAHW_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h new file mode 100644 index 000000000000..8feeed05de0e --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -0,0 +1,486 @@ +/* Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_TSE_H__ +#define __ALTERA_TSE_H__ + +#define ALTERA_TSE_RESOURCE_NAME "altera_tse" + +#include <linux/bitops.h> +#include <linux/if_vlan.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000 +#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in + * bytes + */ +/* Rx FIFO default settings */ +#define ALTERA_TSE_RX_SECTION_EMPTY 16 +#define ALTERA_TSE_RX_SECTION_FULL 0 +#define ALTERA_TSE_RX_ALMOST_EMPTY 8 +#define ALTERA_TSE_RX_ALMOST_FULL 8 + +/* Tx FIFO default settings */ +#define ALTERA_TSE_TX_SECTION_EMPTY 16 +#define ALTERA_TSE_TX_SECTION_FULL 0 +#define ALTERA_TSE_TX_ALMOST_EMPTY 8 +#define ALTERA_TSE_TX_ALMOST_FULL 3 + +/* MAC function configuration default settings */ +#define ALTERA_TSE_TX_IPG_LENGTH 12 + +#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) + +/* MAC Command_Config Register Bit Definitions + */ +#define MAC_CMDCFG_TX_ENA BIT(0) +#define MAC_CMDCFG_RX_ENA BIT(1) +#define MAC_CMDCFG_XON_GEN BIT(2) +#define MAC_CMDCFG_ETH_SPEED BIT(3) +#define MAC_CMDCFG_PROMIS_EN BIT(4) +#define MAC_CMDCFG_PAD_EN BIT(5) +#define MAC_CMDCFG_CRC_FWD BIT(6) +#define MAC_CMDCFG_PAUSE_FWD BIT(7) +#define MAC_CMDCFG_PAUSE_IGNORE BIT(8) +#define MAC_CMDCFG_TX_ADDR_INS BIT(9) +#define MAC_CMDCFG_HD_ENA BIT(10) +#define MAC_CMDCFG_EXCESS_COL BIT(11) +#define MAC_CMDCFG_LATE_COL BIT(12) +#define MAC_CMDCFG_SW_RESET BIT(13) +#define MAC_CMDCFG_MHASH_SEL BIT(14) +#define MAC_CMDCFG_LOOP_ENA BIT(15) +#define MAC_CMDCFG_TX_ADDR_SEL(v) (((v) & 0x7) << 16) +#define MAC_CMDCFG_MAGIC_ENA BIT(19) +#define MAC_CMDCFG_SLEEP BIT(20) +#define MAC_CMDCFG_WAKEUP BIT(21) +#define MAC_CMDCFG_XOFF_GEN BIT(22) +#define MAC_CMDCFG_CNTL_FRM_ENA BIT(23) +#define MAC_CMDCFG_NO_LGTH_CHECK BIT(24) +#define MAC_CMDCFG_ENA_10 BIT(25) +#define MAC_CMDCFG_RX_ERR_DISC BIT(26) +#define MAC_CMDCFG_DISABLE_READ_TIMEOUT BIT(27) +#define MAC_CMDCFG_CNT_RESET BIT(31) + +#define MAC_CMDCFG_TX_ENA_GET(v) GET_BIT_VALUE(v, 0) +#define MAC_CMDCFG_RX_ENA_GET(v) GET_BIT_VALUE(v, 1) +#define MAC_CMDCFG_XON_GEN_GET(v) GET_BIT_VALUE(v, 2) +#define MAC_CMDCFG_ETH_SPEED_GET(v) GET_BIT_VALUE(v, 3) +#define MAC_CMDCFG_PROMIS_EN_GET(v) GET_BIT_VALUE(v, 4) +#define MAC_CMDCFG_PAD_EN_GET(v) GET_BIT_VALUE(v, 5) +#define MAC_CMDCFG_CRC_FWD_GET(v) GET_BIT_VALUE(v, 6) +#define MAC_CMDCFG_PAUSE_FWD_GET(v) GET_BIT_VALUE(v, 7) +#define MAC_CMDCFG_PAUSE_IGNORE_GET(v) GET_BIT_VALUE(v, 8) +#define MAC_CMDCFG_TX_ADDR_INS_GET(v) GET_BIT_VALUE(v, 9) +#define MAC_CMDCFG_HD_ENA_GET(v) GET_BIT_VALUE(v, 10) +#define MAC_CMDCFG_EXCESS_COL_GET(v) GET_BIT_VALUE(v, 11) +#define MAC_CMDCFG_LATE_COL_GET(v) GET_BIT_VALUE(v, 12) +#define MAC_CMDCFG_SW_RESET_GET(v) GET_BIT_VALUE(v, 13) +#define MAC_CMDCFG_MHASH_SEL_GET(v) GET_BIT_VALUE(v, 14) +#define MAC_CMDCFG_LOOP_ENA_GET(v) GET_BIT_VALUE(v, 15) +#define MAC_CMDCFG_TX_ADDR_SEL_GET(v) (((v) >> 16) & 0x7) +#define MAC_CMDCFG_MAGIC_ENA_GET(v) GET_BIT_VALUE(v, 19) +#define MAC_CMDCFG_SLEEP_GET(v) GET_BIT_VALUE(v, 20) +#define MAC_CMDCFG_WAKEUP_GET(v) GET_BIT_VALUE(v, 21) +#define MAC_CMDCFG_XOFF_GEN_GET(v) GET_BIT_VALUE(v, 22) +#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v) GET_BIT_VALUE(v, 23) +#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v) GET_BIT_VALUE(v, 24) +#define MAC_CMDCFG_ENA_10_GET(v) GET_BIT_VALUE(v, 25) +#define MAC_CMDCFG_RX_ERR_DISC_GET(v) GET_BIT_VALUE(v, 26) +#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27) +#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31) + +/* MDIO registers within MAC register Space + */ +struct altera_tse_mdio { + u32 control; /* PHY device operation control register */ + u32 status; /* PHY device operation status register */ + u32 phy_id1; /* Bits 31:16 of PHY identifier */ + u32 phy_id2; /* Bits 15:0 of PHY identifier */ + u32 auto_negotiation_advertisement; /* Auto-negotiation + * advertisement + * register + */ + u32 remote_partner_base_page_ability; + + u32 reg6; + u32 reg7; + u32 reg8; + u32 reg9; + u32 rega; + u32 regb; + u32 regc; + u32 regd; + u32 rege; + u32 regf; + u32 reg10; + u32 reg11; + u32 reg12; + u32 reg13; + u32 reg14; + u32 reg15; + u32 reg16; + u32 reg17; + u32 reg18; + u32 reg19; + u32 reg1a; + u32 reg1b; + u32 reg1c; + u32 reg1d; + u32 reg1e; + u32 reg1f; +}; + +/* MAC register Space. Note that some of these registers may or may not be + * present depending upon options chosen by the user when the core was + * configured and built. Please consult the Altera Triple Speed Ethernet User + * Guide for details. + */ +struct altera_tse_mac { + /* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer + * specific revision + */ + u32 megacore_revision; + /* Provides a memory location for user applications to test the device + * memory operation. + */ + u32 scratch_pad; + /* The host processor uses this register to control and configure the + * MAC block + */ + u32 command_config; + /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary + * MAC address + */ + u32 mac_addr_0; + /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary + * MAC address + */ + u32 mac_addr_1; + /* 14-bit maximum frame length. The MAC receive logic */ + u32 frm_length; + /* The pause quanta is used in each pause frame sent to a remote + * Ethernet device, in increments of 512 Ethernet bit times + */ + u32 pause_quanta; + /* 12-bit receive FIFO section-empty threshold */ + u32 rx_section_empty; + /* 12-bit receive FIFO section-full threshold */ + u32 rx_section_full; + /* 12-bit transmit FIFO section-empty threshold */ + u32 tx_section_empty; + /* 12-bit transmit FIFO section-full threshold */ + u32 tx_section_full; + /* 12-bit receive FIFO almost-empty threshold */ + u32 rx_almost_empty; + /* 12-bit receive FIFO almost-full threshold */ + u32 rx_almost_full; + /* 12-bit transmit FIFO almost-empty threshold */ + u32 tx_almost_empty; + /* 12-bit transmit FIFO almost-full threshold */ + u32 tx_almost_full; + /* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */ + u32 mdio_phy0_addr; + /* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */ + u32 mdio_phy1_addr; + + /* Bit[15:0]—16-bit holdoff quanta */ + u32 holdoff_quant; + + /* only if 100/1000 BaseX PCS, reserved otherwise */ + u32 reserved1[5]; + + /* Minimum IPG between consecutive transmit frame in terms of bytes */ + u32 tx_ipg_length; + + /* IEEE 802.3 oEntity Managed Object Support */ + + /* The MAC addresses */ + u32 mac_id_1; + u32 mac_id_2; + + /* Number of frames transmitted without error including pause frames */ + u32 frames_transmitted_ok; + /* Number of frames received without error including pause frames */ + u32 frames_received_ok; + /* Number of frames received with a CRC error */ + u32 frames_check_sequence_errors; + /* Frame received with an alignment error */ + u32 alignment_errors; + /* Sum of payload and padding octets of frames transmitted without + * error + */ + u32 octets_transmitted_ok; + /* Sum of payload and padding octets of frames received without error */ + u32 octets_received_ok; + + /* IEEE 802.3 oPausedEntity Managed Object Support */ + + /* Number of transmitted pause frames */ + u32 tx_pause_mac_ctrl_frames; + /* Number of Received pause frames */ + u32 rx_pause_mac_ctrl_frames; + + /* IETF MIB (MIB-II) Object Support */ + + /* Number of frames received with error */ + u32 if_in_errors; + /* Number of frames transmitted with error */ + u32 if_out_errors; + /* Number of valid received unicast frames */ + u32 if_in_ucast_pkts; + /* Number of valid received multicasts frames (without pause) */ + u32 if_in_multicast_pkts; + /* Number of valid received broadcast frames */ + u32 if_in_broadcast_pkts; + u32 if_out_discards; + /* The number of valid unicast frames transmitted */ + u32 if_out_ucast_pkts; + /* The number of valid multicast frames transmitted, + * excluding pause frames + */ + u32 if_out_multicast_pkts; + u32 if_out_broadcast_pkts; + + /* IETF RMON MIB Object Support */ + + /* Counts the number of dropped packets due to internal errors + * of the MAC client. + */ + u32 ether_stats_drop_events; + /* Total number of bytes received. Good and bad frames. */ + u32 ether_stats_octets; + /* Total number of packets received. Counts good and bad packets. */ + u32 ether_stats_pkts; + /* Number of packets received with less than 64 bytes. */ + u32 ether_stats_undersize_pkts; + /* The number of frames received that are longer than the + * value configured in the frm_length register + */ + u32 ether_stats_oversize_pkts; + /* Number of received packet with 64 bytes */ + u32 ether_stats_pkts_64_octets; + /* Frames (good and bad) with 65 to 127 bytes */ + u32 ether_stats_pkts_65to127_octets; + /* Frames (good and bad) with 128 to 255 bytes */ + u32 ether_stats_pkts_128to255_octets; + /* Frames (good and bad) with 256 to 511 bytes */ + u32 ether_stats_pkts_256to511_octets; + /* Frames (good and bad) with 512 to 1023 bytes */ + u32 ether_stats_pkts_512to1023_octets; + /* Frames (good and bad) with 1024 to 1518 bytes */ + u32 ether_stats_pkts_1024to1518_octets; + + /* Any frame length from 1519 to the maximum length configured in the + * frm_length register, if it is greater than 1518 + */ + u32 ether_stats_pkts_1519tox_octets; + /* Too long frames with CRC error */ + u32 ether_stats_jabbers; + /* Too short frames with CRC error */ + u32 ether_stats_fragments; + + u32 reserved2; + + /* FIFO control register */ + u32 tx_cmd_stat; + u32 rx_cmd_stat; + + /* Extended Statistics Counters */ + u32 msb_octets_transmitted_ok; + u32 msb_octets_received_ok; + u32 msb_ether_stats_octets; + + u32 reserved3; + + /* Multicast address resolution table, mapped in the controller address + * space + */ + u32 hash_table[64]; + + /* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY + * management interface + */ + struct altera_tse_mdio mdio_phy0; + struct altera_tse_mdio mdio_phy1; + + /* 4 Supplemental MAC Addresses */ + u32 supp_mac_addr_0_0; + u32 supp_mac_addr_0_1; + u32 supp_mac_addr_1_0; + u32 supp_mac_addr_1_1; + u32 supp_mac_addr_2_0; + u32 supp_mac_addr_2_1; + u32 supp_mac_addr_3_0; + u32 supp_mac_addr_3_1; + + u32 reserved4[8]; + + /* IEEE 1588v2 Feature */ + u32 tx_period; + u32 tx_adjust_fns; + u32 tx_adjust_ns; + u32 rx_period; + u32 rx_adjust_fns; + u32 rx_adjust_ns; + + u32 reserved5[42]; +}; + +/* Transmit and Receive Command Registers Bit Definitions + */ +#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) +#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 BIT(18) +#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16 BIT(25) + +/* Wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct tse_buffer { + struct list_head lh; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 len; + int mapped_as_page; +}; + +struct altera_tse_private; + +#define ALTERA_DTYPE_SGDMA 1 +#define ALTERA_DTYPE_MSGDMA 2 + +/* standard DMA interface for SGDMA and MSGDMA */ +struct altera_dmaops { + int altera_dtype; + int dmamask; + void (*reset_dma)(struct altera_tse_private *); + void (*enable_txirq)(struct altera_tse_private *); + void (*enable_rxirq)(struct altera_tse_private *); + void (*disable_txirq)(struct altera_tse_private *); + void (*disable_rxirq)(struct altera_tse_private *); + void (*clear_txirq)(struct altera_tse_private *); + void (*clear_rxirq)(struct altera_tse_private *); + int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); + u32 (*tx_completions)(struct altera_tse_private *); + int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); + u32 (*get_rx_status)(struct altera_tse_private *); + int (*init_dma)(struct altera_tse_private *); + void (*uninit_dma)(struct altera_tse_private *); +}; + +/* This structure is private to each device. + */ +struct altera_tse_private { + struct net_device *dev; + struct device *device; + struct napi_struct napi; + + /* MAC address space */ + struct altera_tse_mac __iomem *mac_dev; + + /* TSE Revision */ + u32 revision; + + /* mSGDMA Rx Dispatcher address space */ + void __iomem *rx_dma_csr; + void __iomem *rx_dma_desc; + void __iomem *rx_dma_resp; + + /* mSGDMA Tx Dispatcher address space */ + void __iomem *tx_dma_csr; + void __iomem *tx_dma_desc; + + /* Rx buffers queue */ + struct tse_buffer *rx_ring; + u32 rx_cons; + u32 rx_prod; + u32 rx_ring_size; + u32 rx_dma_buf_sz; + + /* Tx ring buffer */ + struct tse_buffer *tx_ring; + u32 tx_prod; + u32 tx_cons; + u32 tx_ring_size; + + /* Interrupts */ + u32 tx_irq; + u32 rx_irq; + + /* RX/TX MAC FIFO configs */ + u32 tx_fifo_depth; + u32 rx_fifo_depth; + u32 max_mtu; + + /* Hash filter settings */ + u32 hash_filter; + u32 added_unicast; + + /* Descriptor memory info for managing SGDMA */ + u32 txdescmem; + u32 rxdescmem; + dma_addr_t rxdescmem_busaddr; + dma_addr_t txdescmem_busaddr; + u32 txctrlreg; + u32 rxctrlreg; + dma_addr_t rxdescphys; + dma_addr_t txdescphys; + + struct list_head txlisthd; + struct list_head rxlisthd; + + /* MAC command_config register protection */ + spinlock_t mac_cfg_lock; + /* Tx path protection */ + spinlock_t tx_lock; + /* Rx DMA & interrupt control protection */ + spinlock_t rxdma_irq_lock; + + /* PHY */ + int phy_addr; /* PHY's MDIO address, -1 for autodetection */ + phy_interface_t phy_iface; + struct mii_bus *mdio; + struct phy_device *phydev; + int oldspeed; + int oldduplex; + int oldlink; + + /* ethtool msglvl option */ + u32 msg_enable; + + struct altera_dmaops *dmaops; +}; + +/* Function prototypes + */ +void altera_tse_set_ethtool_ops(struct net_device *); + +#endif /* __ALTERA_TSE_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c new file mode 100644 index 000000000000..319ca74f5e74 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -0,0 +1,235 @@ +/* Ethtool support for Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +#include "altera_tse.h" + +#define TSE_STATS_LEN 31 +#define TSE_NUM_REGS 128 + +static char const stat_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "rx_crc_errors", + "rx_align_errors", + "tx_bytes", + "rx_bytes", + "tx_pause", + "rx_pause", + "rx_errors", + "tx_errors", + "rx_unicast", + "rx_multicast", + "rx_broadcast", + "tx_discards", + "tx_unicast", + "tx_multicast", + "tx_broadcast", + "ether_drops", + "rx_total_bytes", + "rx_total_packets", + "rx_undersize", + "rx_oversize", + "rx_64_bytes", + "rx_65_127_bytes", + "rx_128_255_bytes", + "rx_256_511_bytes", + "rx_512_1023_bytes", + "rx_1024_1518_bytes", + "rx_gte_1519_bytes", + "rx_jabbers", + "rx_runts", +}; + +static void tse_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct altera_tse_private *priv = netdev_priv(dev); + u32 rev = ioread32(&priv->mac_dev->megacore_revision); + + strcpy(info->driver, "Altera TSE MAC IP Driver"); + strcpy(info->version, "v8.0"); + snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", + rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); + sprintf(info->bus_info, "platform"); +} + +/* Fill in a buffer with the strings which correspond to the + * stats + */ +static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf) +{ + memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN); +} + +static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 *buf) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct altera_tse_mac *mac = priv->mac_dev; + u64 ext; + + buf[0] = ioread32(&mac->frames_transmitted_ok); + buf[1] = ioread32(&mac->frames_received_ok); + buf[2] = ioread32(&mac->frames_check_sequence_errors); + buf[3] = ioread32(&mac->alignment_errors); + + /* Extended aOctetsTransmittedOK counter */ + ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; + ext |= ioread32(&mac->octets_transmitted_ok); + buf[4] = ext; + + /* Extended aOctetsReceivedOK counter */ + ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; + ext |= ioread32(&mac->octets_received_ok); + buf[5] = ext; + + buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); + buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); + buf[8] = ioread32(&mac->if_in_errors); + buf[9] = ioread32(&mac->if_out_errors); + buf[10] = ioread32(&mac->if_in_ucast_pkts); + buf[11] = ioread32(&mac->if_in_multicast_pkts); + buf[12] = ioread32(&mac->if_in_broadcast_pkts); + buf[13] = ioread32(&mac->if_out_discards); + buf[14] = ioread32(&mac->if_out_ucast_pkts); + buf[15] = ioread32(&mac->if_out_multicast_pkts); + buf[16] = ioread32(&mac->if_out_broadcast_pkts); + buf[17] = ioread32(&mac->ether_stats_drop_events); + + /* Extended etherStatsOctets counter */ + ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; + ext |= ioread32(&mac->ether_stats_octets); + buf[18] = ext; + + buf[19] = ioread32(&mac->ether_stats_pkts); + buf[20] = ioread32(&mac->ether_stats_undersize_pkts); + buf[21] = ioread32(&mac->ether_stats_oversize_pkts); + buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); + buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); + buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); + buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); + buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); + buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); + buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); + buf[29] = ioread32(&mac->ether_stats_jabbers); + buf[30] = ioread32(&mac->ether_stats_fragments); +} + +static int tse_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return TSE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static u32 tse_get_msglevel(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void tse_set_msglevel(struct net_device *dev, uint32_t data) +{ + struct altera_tse_private *priv = netdev_priv(dev); + priv->msg_enable = data; +} + +static int tse_reglen(struct net_device *dev) +{ + return TSE_NUM_REGS * sizeof(u32); +} + +static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *regbuf) +{ + int i; + struct altera_tse_private *priv = netdev_priv(dev); + u32 *tse_mac_regs = (u32 *)priv->mac_dev; + u32 *buf = regbuf; + + /* Set version to a known value, so ethtool knows + * how to do any special formatting of this data. + * This version number will need to change if and + * when this register table is changed. + */ + + regs->version = 1; + + for (i = 0; i < TSE_NUM_REGS; i++) + buf[i] = ioread32(&tse_mac_regs[i]); +} + +static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static const struct ethtool_ops tse_ethtool_ops = { + .get_drvinfo = tse_get_drvinfo, + .get_regs_len = tse_reglen, + .get_regs = tse_get_regs, + .get_link = ethtool_op_get_link, + .get_settings = tse_get_settings, + .set_settings = tse_set_settings, + .get_strings = tse_gstrings, + .get_sset_count = tse_sset_count, + .get_ethtool_stats = tse_fill_stats, + .get_msglevel = tse_get_msglevel, + .set_msglevel = tse_set_msglevel, +}; + +void altera_tse_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops); +} diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c new file mode 100644 index 000000000000..c70a29e0b9f7 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -0,0 +1,1543 @@ +/* Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <asm/cacheflush.h> + +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_sgdma.h" +#include "altera_msgdma.h" + +static atomic_t instance_count = ATOMIC_INIT(~0); +/* Module parameters */ +static int debug = -1; +module_param(debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +#define RX_DESCRIPTORS 64 +static int dma_rx_num = RX_DESCRIPTORS; +module_param(dma_rx_num, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list"); + +#define TX_DESCRIPTORS 64 +static int dma_tx_num = TX_DESCRIPTORS; +module_param(dma_tx_num, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); + + +#define POLL_PHY (-1) + +/* Make sure DMA buffer size is larger than the max frame size + * plus some alignment offset and a VLAN header. If the max frame size is + * 1518, a VLAN header would be additional 4 bytes and additional + * headroom for alignment is 2 bytes, 2048 is just fine. + */ +#define ALTERA_RXDMABUFFER_SIZE 2048 + +/* Allow network stack to resume queueing packets after we've + * finished transmitting at least 1/4 of the packets in the queue. + */ +#define TSE_TX_THRESH(x) (x->tx_ring_size / 4) + +#define TXQUEUESTOP_THRESHHOLD 2 + +static struct of_device_id altera_tse_ids[]; + +static inline u32 tse_tx_avail(struct altera_tse_private *priv) +{ + return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; +} + +/* MDIO specific functions + */ +static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; + unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; + u32 data; + + /* set MDIO address */ + iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + + /* get the data */ + data = ioread32(&mdio_regs[regnum]) & 0xffff; + return data; +} + +static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; + unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; + + /* set MDIO address */ + iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + + /* write the data */ + iowrite32((u32) value, &mdio_regs[regnum]); + return 0; +} + +static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int ret; + int i; + struct device_node *mdio_node = NULL; + struct mii_bus *mdio = NULL; + struct device_node *child_node = NULL; + + for_each_child_of_node(priv->device->of_node, child_node) { + if (of_device_is_compatible(child_node, "altr,tse-mdio")) { + mdio_node = child_node; + break; + } + } + + if (mdio_node) { + netdev_dbg(dev, "FOUND MDIO subnode\n"); + } else { + netdev_dbg(dev, "NO MDIO subnode\n"); + return 0; + } + + mdio = mdiobus_alloc(); + if (mdio == NULL) { + netdev_err(dev, "Error allocating MDIO bus\n"); + return -ENOMEM; + } + + mdio->name = ALTERA_TSE_RESOURCE_NAME; + mdio->read = &altera_tse_mdio_read; + mdio->write = &altera_tse_mdio_write; + snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id); + + mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (mdio->irq == NULL) { + ret = -ENOMEM; + goto out_free_mdio; + } + for (i = 0; i < PHY_MAX_ADDR; i++) + mdio->irq[i] = PHY_POLL; + + mdio->priv = priv->mac_dev; + mdio->parent = priv->device; + + ret = of_mdiobus_register(mdio, mdio_node); + if (ret != 0) { + netdev_err(dev, "Cannot register MDIO bus %s\n", + mdio->id); + goto out_free_mdio_irq; + } + + if (netif_msg_drv(priv)) + netdev_info(dev, "MDIO bus %s: created\n", mdio->id); + + priv->mdio = mdio; + return 0; +out_free_mdio_irq: + kfree(mdio->irq); +out_free_mdio: + mdiobus_free(mdio); + mdio = NULL; + return ret; +} + +static void altera_tse_mdio_destroy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + + if (priv->mdio == NULL) + return; + + if (netif_msg_drv(priv)) + netdev_info(dev, "MDIO bus %s: removed\n", + priv->mdio->id); + + mdiobus_unregister(priv->mdio); + kfree(priv->mdio->irq); + mdiobus_free(priv->mdio); + priv->mdio = NULL; +} + +static int tse_init_rx_buffer(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer, int len) +{ + rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); + if (!rxbuffer->skb) + return -ENOMEM; + + rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, + len, + DMA_FROM_DEVICE); + + if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(rxbuffer->skb); + return -EINVAL; + } + rxbuffer->len = len; + return 0; +} + +static void tse_free_rx_buffer(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer) +{ + struct sk_buff *skb = rxbuffer->skb; + dma_addr_t dma_addr = rxbuffer->dma_addr; + + if (skb != NULL) { + if (dma_addr) + dma_unmap_single(priv->device, dma_addr, + rxbuffer->len, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + rxbuffer->skb = NULL; + rxbuffer->dma_addr = 0; + } +} + +/* Unmap and free Tx buffer resources + */ +static void tse_free_tx_buffer(struct altera_tse_private *priv, + struct tse_buffer *buffer) +{ + if (buffer->dma_addr) { + if (buffer->mapped_as_page) + dma_unmap_page(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + buffer->dma_addr = 0; + } + if (buffer->skb) { + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } +} + +static int alloc_init_skbufs(struct altera_tse_private *priv) +{ + unsigned int rx_descs = priv->rx_ring_size; + unsigned int tx_descs = priv->tx_ring_size; + int ret = -ENOMEM; + int i; + + /* Create Rx ring buffer */ + priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer), + GFP_KERNEL); + if (!priv->rx_ring) + goto err_rx_ring; + + /* Create Tx ring buffer */ + priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer), + GFP_KERNEL); + if (!priv->tx_ring) + goto err_tx_ring; + + priv->tx_cons = 0; + priv->tx_prod = 0; + + /* Init Rx ring */ + for (i = 0; i < rx_descs; i++) { + ret = tse_init_rx_buffer(priv, &priv->rx_ring[i], + priv->rx_dma_buf_sz); + if (ret) + goto err_init_rx_buffers; + } + + priv->rx_cons = 0; + priv->rx_prod = 0; + + return 0; +err_init_rx_buffers: + while (--i >= 0) + tse_free_rx_buffer(priv, &priv->rx_ring[i]); + kfree(priv->tx_ring); +err_tx_ring: + kfree(priv->rx_ring); +err_rx_ring: + return ret; +} + +static void free_skbufs(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + unsigned int rx_descs = priv->rx_ring_size; + unsigned int tx_descs = priv->tx_ring_size; + int i; + + /* Release the DMA TX/RX socket buffers */ + for (i = 0; i < rx_descs; i++) + tse_free_rx_buffer(priv, &priv->rx_ring[i]); + for (i = 0; i < tx_descs; i++) + tse_free_tx_buffer(priv, &priv->tx_ring[i]); + + + kfree(priv->tx_ring); +} + +/* Reallocate the skb for the reception process + */ +static inline void tse_rx_refill(struct altera_tse_private *priv) +{ + unsigned int rxsize = priv->rx_ring_size; + unsigned int entry; + int ret; + + for (; priv->rx_cons - priv->rx_prod > 0; + priv->rx_prod++) { + entry = priv->rx_prod % rxsize; + if (likely(priv->rx_ring[entry].skb == NULL)) { + ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry], + priv->rx_dma_buf_sz); + if (unlikely(ret != 0)) + break; + priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); + } + } +} + +/* Pull out the VLAN tag and fix up the packet + */ +static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + u16 vid; + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + !__vlan_get_tag(skb, &vid)) { + eth_hdr = (struct ethhdr *)skb->data; + memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/* Receive a packet: retrieve and pass over to upper levels + */ +static int tse_rx(struct altera_tse_private *priv, int limit) +{ + unsigned int count = 0; + unsigned int next_entry; + struct sk_buff *skb; + unsigned int entry = priv->rx_cons % priv->rx_ring_size; + u32 rxstatus; + u16 pktlength; + u16 pktstatus; + + while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { + pktstatus = rxstatus >> 16; + pktlength = rxstatus & 0xffff; + + if ((pktstatus & 0xFF) || (pktlength == 0)) + netdev_err(priv->dev, + "RCV pktstatus %08X pktlength %08X\n", + pktstatus, pktlength); + + count++; + next_entry = (++priv->rx_cons) % priv->rx_ring_size; + + skb = priv->rx_ring[entry].skb; + if (unlikely(!skb)) { + netdev_err(priv->dev, + "%s: Inconsistent Rx descriptor chain\n", + __func__); + priv->dev->stats.rx_dropped++; + break; + } + priv->rx_ring[entry].skb = NULL; + + skb_put(skb, pktlength); + + /* make cache consistent with receive packet buffer */ + dma_sync_single_for_cpu(priv->device, + priv->rx_ring[entry].dma_addr, + priv->rx_ring[entry].len, + DMA_FROM_DEVICE); + + dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, + priv->rx_ring[entry].len, DMA_FROM_DEVICE); + + if (netif_msg_pktdata(priv)) { + netdev_info(priv->dev, "frame received %d bytes\n", + pktlength); + print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, pktlength, true); + } + + tse_rx_vlan(priv->dev, skb); + + skb->protocol = eth_type_trans(skb, priv->dev); + skb_checksum_none_assert(skb); + + napi_gro_receive(&priv->napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += pktlength; + + entry = next_entry; + } + + tse_rx_refill(priv); + return count; +} + +/* Reclaim resources after transmission completes + */ +static int tse_tx_complete(struct altera_tse_private *priv) +{ + unsigned int txsize = priv->tx_ring_size; + u32 ready; + unsigned int entry; + struct tse_buffer *tx_buff; + int txcomplete = 0; + + spin_lock(&priv->tx_lock); + + ready = priv->dmaops->tx_completions(priv); + + /* Free sent buffers */ + while (ready && (priv->tx_cons != priv->tx_prod)) { + entry = priv->tx_cons % txsize; + tx_buff = &priv->tx_ring[entry]; + + if (netif_msg_tx_done(priv)) + netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n", + __func__, priv->tx_prod, priv->tx_cons); + + if (likely(tx_buff->skb)) + priv->dev->stats.tx_packets++; + + tse_free_tx_buffer(priv, tx_buff); + priv->tx_cons++; + + txcomplete++; + ready--; + } + + if (unlikely(netif_queue_stopped(priv->dev) && + tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_queue_stopped(priv->dev) && + tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { + if (netif_msg_tx_done(priv)) + netdev_dbg(priv->dev, "%s: restart transmit\n", + __func__); + netif_wake_queue(priv->dev); + } + netif_tx_unlock(priv->dev); + } + + spin_unlock(&priv->tx_lock); + return txcomplete; +} + +/* NAPI polling function + */ +static int tse_poll(struct napi_struct *napi, int budget) +{ + struct altera_tse_private *priv = + container_of(napi, struct altera_tse_private, napi); + int rxcomplete = 0; + int txcomplete = 0; + unsigned long int flags; + + txcomplete = tse_tx_complete(priv); + + rxcomplete = tse_rx(priv, budget); + + if (rxcomplete >= budget || txcomplete > 0) + return rxcomplete; + + napi_gro_flush(napi, false); + __napi_complete(napi); + + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", + txcomplete+rxcomplete, budget); + + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(priv); + priv->dmaops->enable_txirq(priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + return rxcomplete + txcomplete; +} + +/* DMA TX & RX FIFO interrupt routing + */ +static irqreturn_t altera_isr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct altera_tse_private *priv; + unsigned long int flags; + + + if (unlikely(!dev)) { + pr_err("%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + priv = netdev_priv(dev); + + /* turn off desc irqs and enable napi rx */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + + if (likely(napi_schedule_prep(&priv->napi))) { + priv->dmaops->disable_rxirq(priv); + priv->dmaops->disable_txirq(priv); + __napi_schedule(&priv->napi); + } + + /* reset IRQs */ + priv->dmaops->clear_rxirq(priv); + priv->dmaops->clear_txirq(priv); + + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + return IRQ_HANDLED; +} + +/* Transmit a packet (called by the kernel). Dispatches + * either the SGDMA method for transmitting or the + * MSGDMA method, assumes no scatter/gather support, + * implying an assumption that there's only one + * physically contiguous fragment starting at + * skb->data, for length of skb_headlen(skb). + */ +static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + unsigned int txsize = priv->tx_ring_size; + unsigned int entry; + struct tse_buffer *buffer = NULL; + int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int nopaged_len = skb_headlen(skb); + enum netdev_tx ret = NETDEV_TX_OK; + dma_addr_t dma_addr; + int txcomplete = 0; + + spin_lock_bh(&priv->tx_lock); + + if (unlikely(tse_tx_avail(priv) < nfrags + 1)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx list full when queue awake\n", + __func__); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Map the first skb fragment */ + entry = priv->tx_prod % txsize; + buffer = &priv->tx_ring[entry]; + + dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + ret = NETDEV_TX_OK; + goto out; + } + + buffer->skb = skb; + buffer->dma_addr = dma_addr; + buffer->len = nopaged_len; + + /* Push data out of the cache hierarchy into main memory */ + dma_sync_single_for_device(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + + txcomplete = priv->dmaops->tx_buffer(priv, buffer); + + skb_tx_timestamp(skb); + + priv->tx_prod++; + dev->stats.tx_bytes += skb->len; + + if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) { + if (netif_msg_hw(priv)) + netdev_dbg(priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_stop_queue(dev); + } + +out: + spin_unlock_bh(&priv->tx_lock); + + return ret; +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the phydev structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void altera_tse_adjust_link(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + int new_state = 0; + + /* only change config if there is a link */ + spin_lock(&priv->mac_cfg_lock); + if (phydev->link) { + /* Read old config */ + u32 cfg_reg = ioread32(&priv->mac_dev->command_config); + + /* Check duplex */ + if (phydev->duplex != priv->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) + cfg_reg |= MAC_CMDCFG_HD_ENA; + else + cfg_reg &= ~MAC_CMDCFG_HD_ENA; + + netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n", + dev->name, phydev->duplex); + + priv->oldduplex = phydev->duplex; + } + + /* Check speed */ + if (phydev->speed != priv->oldspeed) { + new_state = 1; + switch (phydev->speed) { + case 1000: + cfg_reg |= MAC_CMDCFG_ETH_SPEED; + cfg_reg &= ~MAC_CMDCFG_ENA_10; + break; + case 100: + cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; + cfg_reg &= ~MAC_CMDCFG_ENA_10; + break; + case 10: + cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; + cfg_reg |= MAC_CMDCFG_ENA_10; + break; + default: + if (netif_msg_link(priv)) + netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + priv->oldspeed = phydev->speed; + } + iowrite32(cfg_reg, &priv->mac_dev->command_config); + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + + spin_unlock(&priv->mac_cfg_lock); +} +static struct phy_device *connect_local_phy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; + char phy_id_fmt[MII_BUS_ID_SIZE + 3]; + int ret; + + if (priv->phy_addr != POLL_PHY) { + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + priv->mdio->id, priv->phy_addr); + + netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt); + + phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, + priv->phy_iface); + if (IS_ERR(phydev)) + netdev_err(dev, "Could not attach to PHY\n"); + + } else { + phydev = phy_find_first(priv->mdio); + if (phydev == NULL) { + netdev_err(dev, "No PHY found\n"); + return phydev; + } + + ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link, + priv->phy_iface); + if (ret != 0) { + netdev_err(dev, "Could not attach to PHY\n"); + phydev = NULL; + } + } + return phydev; +} + +/* Initialize driver's PHY state, and attach to the PHY + */ +static int init_phy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev; + struct device_node *phynode; + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); + + if (!phynode) { + netdev_dbg(dev, "no phy-handle found\n"); + if (!priv->mdio) { + netdev_err(dev, + "No phy-handle nor local mdio specified\n"); + return -ENODEV; + } + phydev = connect_local_phy(dev); + } else { + netdev_dbg(dev, "phy-handle found\n"); + phydev = of_phy_connect(dev, phynode, + &altera_tse_adjust_link, 0, priv->phy_iface); + } + + if (!phydev) { + netdev_err(dev, "Could not find the PHY\n"); + return -ENODEV; + } + + /* Stop Advertising 1000BASE Capability if interface is not GMII + * Note: Checkpatch throws CHECKs for the camel case defines below, + * it's ok to ignore. + */ + if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || + (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) + phydev->advertising &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + /* Broken HW is sometimes missing the pull-up resistor on the + * MDIO line, which results in reads to non-existent devices returning + * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent + * device as well. + * Note: phydev->phy_id is the result of reading the UID PHY registers. + */ + if (phydev->phy_id == 0) { + netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); + phy_disconnect(phydev); + return -ENODEV; + } + + netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", + phydev->addr, phydev->phy_id, phydev->link); + + priv->phydev = phydev; + return 0; +} + +static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) +{ + struct altera_tse_mac *mac = priv->mac_dev; + u32 msb; + u32 lsb; + + msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + lsb = ((addr[5] << 8) | addr[4]) & 0xffff; + + /* Set primary MAC address */ + iowrite32(msb, &mac->mac_addr_0); + iowrite32(lsb, &mac->mac_addr_1); +} + +/* MAC software reset. + * When reset is triggered, the MAC function completes the current + * transmission or reception, and subsequently disables the transmit and + * receive logic, flushes the receive FIFO buffer, and resets the statistics + * counters. + */ +static int reset_mac(struct altera_tse_private *priv) +{ + void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config; + int counter; + u32 dat; + + dat = ioread32(cmd_cfg_reg); + dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); + dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; + iowrite32(dat, cmd_cfg_reg); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + dat = ioread32(cmd_cfg_reg); + dat &= ~MAC_CMDCFG_SW_RESET; + iowrite32(dat, cmd_cfg_reg); + return -1; + } + return 0; +} + +/* Initialize MAC core registers +*/ +static int init_mac(struct altera_tse_private *priv) +{ + struct altera_tse_mac *mac = priv->mac_dev; + unsigned int cmd = 0; + u32 frm_length; + + /* Setup Rx FIFO */ + iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, + &mac->rx_section_empty); + iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); + iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); + iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); + + /* Setup Tx FIFO */ + iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, + &mac->tx_section_empty); + iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); + iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); + iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); + + /* MAC Address Configuration */ + tse_update_mac_addr(priv, priv->dev->dev_addr); + + /* MAC Function Configuration */ + frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; + iowrite32(frm_length, &mac->frm_length); + iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); + + /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit + * start address + */ + tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); + tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | + ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); + + /* Set the MAC options */ + cmd = ioread32(&mac->command_config); + cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */ + cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ + cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames + * with CRC errors + */ + cmd |= MAC_CMDCFG_CNTL_FRM_ENA; + cmd &= ~MAC_CMDCFG_TX_ENA; + cmd &= ~MAC_CMDCFG_RX_ENA; + iowrite32(cmd, &mac->command_config); + + if (netif_msg_hw(priv)) + dev_dbg(priv->device, + "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd); + + return 0; +} + +/* Start/stop MAC transmission logic + */ +static void tse_set_mac(struct altera_tse_private *priv, bool enable) +{ + struct altera_tse_mac *mac = priv->mac_dev; + u32 value = ioread32(&mac->command_config); + + if (enable) + value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; + else + value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); + + iowrite32(value, &mac->command_config); +} + +/* Change the MTU + */ +static int tse_change_mtu(struct net_device *dev, int new_mtu) +{ + struct altera_tse_private *priv = netdev_priv(dev); + unsigned int max_mtu = priv->max_mtu; + unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN; + + if (netif_running(dev)) { + netdev_err(dev, "must be stopped to change its MTU\n"); + return -EBUSY; + } + + if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) { + netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu); + return -EINVAL; + } + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static void altera_tse_set_mcfilter(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct altera_tse_mac *mac = priv->mac_dev; + int i; + struct netdev_hw_addr *ha; + + /* clear the hash filter */ + for (i = 0; i < 64; i++) + iowrite32(0, &(mac->hash_table[i])); + + netdev_for_each_mc_addr(ha, dev) { + unsigned int hash = 0; + int mac_octet; + + for (mac_octet = 5; mac_octet >= 0; mac_octet--) { + unsigned char xor_bit = 0; + unsigned char octet = ha->addr[mac_octet]; + unsigned int bitshift; + + for (bitshift = 0; bitshift < 8; bitshift++) + xor_bit ^= ((octet >> bitshift) & 0x01); + + hash = (hash << 1) | xor_bit; + } + iowrite32(1, &(mac->hash_table[hash])); + } +} + + +static void altera_tse_set_mcfilterall(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct altera_tse_mac *mac = priv->mac_dev; + int i; + + /* set the hash filter */ + for (i = 0; i < 64; i++) + iowrite32(1, &(mac->hash_table[i])); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void tse_set_rx_mode_hashfilter(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct altera_tse_mac *mac = priv->mac_dev; + + spin_lock(&priv->mac_cfg_lock); + + if (dev->flags & IFF_PROMISC) + tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + + if (dev->flags & IFF_ALLMULTI) + altera_tse_set_mcfilterall(dev); + else + altera_tse_set_mcfilter(dev); + + spin_unlock(&priv->mac_cfg_lock); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void tse_set_rx_mode(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct altera_tse_mac *mac = priv->mac_dev; + + spin_lock(&priv->mac_cfg_lock); + + if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || + !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) + tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + else + tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + + spin_unlock(&priv->mac_cfg_lock); +} + +/* Open and initialize the interface + */ +static int tse_open(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int ret = 0; + int i; + unsigned long int flags; + + /* Reset and configure TSE MAC and probe associated PHY */ + ret = priv->dmaops->init_dma(priv); + if (ret != 0) { + netdev_err(dev, "Cannot initialize DMA\n"); + goto phy_error; + } + + if (netif_msg_ifup(priv)) + netdev_warn(dev, "device MAC address %pM\n", + dev->dev_addr); + + if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) + netdev_warn(dev, "TSE revision %x\n", priv->revision); + + spin_lock(&priv->mac_cfg_lock); + ret = reset_mac(priv); + if (ret) + netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + + ret = init_mac(priv); + spin_unlock(&priv->mac_cfg_lock); + if (ret) { + netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret); + goto alloc_skbuf_error; + } + + priv->dmaops->reset_dma(priv); + + /* Create and initialize the TX/RX descriptors chains. */ + priv->rx_ring_size = dma_rx_num; + priv->tx_ring_size = dma_tx_num; + ret = alloc_init_skbufs(priv); + if (ret) { + netdev_err(dev, "DMA descriptors initialization failed\n"); + goto alloc_skbuf_error; + } + + + /* Register RX interrupt */ + ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED, + dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register RX interrupt %d\n", + priv->rx_irq); + goto init_error; + } + + /* Register TX interrupt */ + ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED, + dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register TX interrupt %d\n", + priv->tx_irq); + goto tx_request_irq_error; + } + + /* Enable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(priv); + priv->dmaops->enable_txirq(priv); + + /* Setup RX descriptor chain */ + for (i = 0; i < priv->rx_ring_size; i++) + priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); + + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + /* Start MAC Rx/Tx */ + spin_lock(&priv->mac_cfg_lock); + tse_set_mac(priv, true); + spin_unlock(&priv->mac_cfg_lock); + + if (priv->phydev) + phy_start(priv->phydev); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + +tx_request_irq_error: + free_irq(priv->rx_irq, dev); +init_error: + free_skbufs(dev); +alloc_skbuf_error: + if (priv->phydev) { + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } +phy_error: + return ret; +} + +/* Stop TSE MAC interface and put the device in an inactive state + */ +static int tse_shutdown(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int ret; + unsigned long int flags; + + /* Stop and disconnect the PHY */ + if (priv->phydev) { + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + /* Disable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->disable_rxirq(priv); + priv->dmaops->disable_txirq(priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + /* Free the IRQ lines */ + free_irq(priv->rx_irq, dev); + free_irq(priv->tx_irq, dev); + + /* disable and reset the MAC, empties fifo */ + spin_lock(&priv->mac_cfg_lock); + spin_lock(&priv->tx_lock); + + ret = reset_mac(priv); + if (ret) + netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + priv->dmaops->reset_dma(priv); + free_skbufs(dev); + + spin_unlock(&priv->tx_lock); + spin_unlock(&priv->mac_cfg_lock); + + priv->dmaops->uninit_dma(priv); + + return 0; +} + +static struct net_device_ops altera_tse_netdev_ops = { + .ndo_open = tse_open, + .ndo_stop = tse_shutdown, + .ndo_start_xmit = tse_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = tse_set_rx_mode, + .ndo_change_mtu = tse_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + + +static int request_and_map(struct platform_device *pdev, const char *name, + struct resource **res, void __iomem **ptr) +{ + struct resource *region; + struct device *device = &pdev->dev; + + *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (*res == NULL) { + dev_err(device, "resource %s not defined\n", name); + return -ENODEV; + } + + region = devm_request_mem_region(device, (*res)->start, + resource_size(*res), dev_name(device)); + if (region == NULL) { + dev_err(device, "unable to request %s\n", name); + return -EBUSY; + } + + *ptr = devm_ioremap_nocache(device, region->start, + resource_size(region)); + if (*ptr == NULL) { + dev_err(device, "ioremap_nocache of %s failed!", name); + return -ENOMEM; + } + + return 0; +} + +/* Probe Altera TSE MAC device + */ +static int altera_tse_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + int ret = -ENODEV; + struct resource *control_port; + struct resource *dma_res; + struct altera_tse_private *priv; + const unsigned char *macaddr; + struct device_node *np = pdev->dev.of_node; + void __iomem *descmap; + const struct of_device_id *of_id = NULL; + + ndev = alloc_etherdev(sizeof(struct altera_tse_private)); + if (!ndev) { + dev_err(&pdev->dev, "Could not allocate network device\n"); + return -ENODEV; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + priv = netdev_priv(ndev); + priv->device = &pdev->dev; + priv->dev = ndev; + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + of_id = of_match_device(altera_tse_ids, &pdev->dev); + + if (of_id) + priv->dmaops = (struct altera_dmaops *)of_id->data; + + + if (priv->dmaops && + priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { + /* Get the mapped address to the SGDMA descriptor memory */ + ret = request_and_map(pdev, "s1", &dma_res, &descmap); + if (ret) + goto out_free; + + /* Start of that memory is for transmit descriptors */ + priv->tx_dma_desc = descmap; + + /* First half is for tx descriptors, other half for tx */ + priv->txdescmem = resource_size(dma_res)/2; + + priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; + + priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + + priv->txdescmem)); + priv->rxdescmem = resource_size(dma_res)/2; + priv->rxdescmem_busaddr = dma_res->start; + priv->rxdescmem_busaddr += priv->txdescmem; + + if (upper_32_bits(priv->rxdescmem_busaddr)) { + dev_dbg(priv->device, + "SGDMA bus addresses greater than 32-bits\n"); + goto out_free; + } + if (upper_32_bits(priv->txdescmem_busaddr)) { + dev_dbg(priv->device, + "SGDMA bus addresses greater than 32-bits\n"); + goto out_free; + } + } else if (priv->dmaops && + priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { + ret = request_and_map(pdev, "rx_resp", &dma_res, + &priv->rx_dma_resp); + if (ret) + goto out_free; + + ret = request_and_map(pdev, "tx_desc", &dma_res, + &priv->tx_dma_desc); + if (ret) + goto out_free; + + priv->txdescmem = resource_size(dma_res); + priv->txdescmem_busaddr = dma_res->start; + + ret = request_and_map(pdev, "rx_desc", &dma_res, + &priv->rx_dma_desc); + if (ret) + goto out_free; + + priv->rxdescmem = resource_size(dma_res); + priv->rxdescmem_busaddr = dma_res->start; + + } else { + goto out_free; + } + + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) + dma_set_coherent_mask(priv->device, + DMA_BIT_MASK(priv->dmaops->dmamask)); + else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) + dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); + else + goto out_free; + + /* MAC address space */ + ret = request_and_map(pdev, "control_port", &control_port, + (void __iomem **)&priv->mac_dev); + if (ret) + goto out_free; + + /* xSGDMA Rx Dispatcher address space */ + ret = request_and_map(pdev, "rx_csr", &dma_res, + &priv->rx_dma_csr); + if (ret) + goto out_free; + + + /* xSGDMA Tx Dispatcher address space */ + ret = request_and_map(pdev, "tx_csr", &dma_res, + &priv->tx_dma_csr); + if (ret) + goto out_free; + + + /* Rx IRQ */ + priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); + if (priv->rx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); + ret = -ENXIO; + goto out_free; + } + + /* Tx IRQ */ + priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq"); + if (priv->tx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); + ret = -ENXIO; + goto out_free; + } + + /* get FIFO depths from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", + &priv->rx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); + ret = -ENXIO; + goto out_free; + } + + if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", + &priv->rx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); + ret = -ENXIO; + goto out_free; + } + + /* get hash filter settings for this instance */ + priv->hash_filter = + of_property_read_bool(pdev->dev.of_node, + "altr,has-hash-multicast-filter"); + + /* get supplemental address settings for this instance */ + priv->added_unicast = + of_property_read_bool(pdev->dev.of_node, + "altr,has-supplementary-unicast"); + + /* Max MTU is 1500, ETH_DATA_LEN */ + priv->max_mtu = ETH_DATA_LEN; + + /* Get the max mtu from the device tree. Note that the + * "max-frame-size" parameter is actually max mtu. Definition + * in the ePAPR v1.1 spec and usage differ, so go with usage. + */ + of_property_read_u32(pdev->dev.of_node, "max-frame-size", + &priv->max_mtu); + + /* The DMA buffer size already accounts for an alignment bias + * to avoid unaligned access exceptions for the NIOS processor, + */ + priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; + + /* get default MAC address from device tree */ + macaddr = of_get_mac_address(pdev->dev.of_node); + if (macaddr) + ether_addr_copy(ndev->dev_addr, macaddr); + else + eth_hw_addr_random(ndev); + + priv->phy_iface = of_get_phy_mode(np); + + /* try to get PHY address from device tree, use PHY autodetection if + * no valid address is given + */ + if (of_property_read_u32(pdev->dev.of_node, "phy-addr", + &priv->phy_addr)) { + priv->phy_addr = POLL_PHY; + } + + if (!((priv->phy_addr == POLL_PHY) || + ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { + dev_err(&pdev->dev, "invalid phy-addr specified %d\n", + priv->phy_addr); + goto out_free; + } + + /* Create/attach to MDIO bus */ + ret = altera_tse_mdio_create(ndev, + atomic_add_return(1, &instance_count)); + + if (ret) + goto out_free; + + /* initialize netdev */ + ether_setup(ndev); + ndev->mem_start = control_port->start; + ndev->mem_end = control_port->end; + ndev->netdev_ops = &altera_tse_netdev_ops; + altera_tse_set_ethtool_ops(ndev); + + altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode; + + if (priv->hash_filter) + altera_tse_netdev_ops.ndo_set_rx_mode = + tse_set_rx_mode_hashfilter; + + /* Scatter/gather IO is not supported, + * so it is turned off + */ + ndev->hw_features &= ~NETIF_F_SG; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + + /* VLAN offloading of tagging, stripping and filtering is not + * supported by hardware, but driver will accommodate the + * extra 4-byte VLAN tag for processing by upper layers + */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + /* setup NAPI interface */ + netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT); + + spin_lock_init(&priv->mac_cfg_lock); + spin_lock_init(&priv->tx_lock); + spin_lock_init(&priv->rxdma_irq_lock); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register TSE net device\n"); + goto out_free_mdio; + } + + platform_set_drvdata(pdev, ndev); + + priv->revision = ioread32(&priv->mac_dev->megacore_revision); + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", + (priv->revision >> 8) & 0xff, + priv->revision & 0xff, + (unsigned long) control_port->start, priv->rx_irq, + priv->tx_irq); + + ret = init_phy(ndev); + if (ret != 0) { + netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); + goto out_free_mdio; + } + return 0; + +out_free_mdio: + altera_tse_mdio_destroy(ndev); +out_free: + free_netdev(ndev); + return ret; +} + +/* Remove Altera TSE MAC device + */ +static int altera_tse_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + altera_tse_mdio_destroy(ndev); + unregister_netdev(ndev); + free_netdev(ndev); + + return 0; +} + +struct altera_dmaops altera_dtype_sgdma = { + .altera_dtype = ALTERA_DTYPE_SGDMA, + .dmamask = 32, + .reset_dma = sgdma_reset, + .enable_txirq = sgdma_enable_txirq, + .enable_rxirq = sgdma_enable_rxirq, + .disable_txirq = sgdma_disable_txirq, + .disable_rxirq = sgdma_disable_rxirq, + .clear_txirq = sgdma_clear_txirq, + .clear_rxirq = sgdma_clear_rxirq, + .tx_buffer = sgdma_tx_buffer, + .tx_completions = sgdma_tx_completions, + .add_rx_desc = sgdma_add_rx_desc, + .get_rx_status = sgdma_rx_status, + .init_dma = sgdma_initialize, + .uninit_dma = sgdma_uninitialize, +}; + +struct altera_dmaops altera_dtype_msgdma = { + .altera_dtype = ALTERA_DTYPE_MSGDMA, + .dmamask = 64, + .reset_dma = msgdma_reset, + .enable_txirq = msgdma_enable_txirq, + .enable_rxirq = msgdma_enable_rxirq, + .disable_txirq = msgdma_disable_txirq, + .disable_rxirq = msgdma_disable_rxirq, + .clear_txirq = msgdma_clear_txirq, + .clear_rxirq = msgdma_clear_rxirq, + .tx_buffer = msgdma_tx_buffer, + .tx_completions = msgdma_tx_completions, + .add_rx_desc = msgdma_add_rx_desc, + .get_rx_status = msgdma_rx_status, + .init_dma = msgdma_initialize, + .uninit_dma = msgdma_uninitialize, +}; + +static struct of_device_id altera_tse_ids[] = { + { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, }, + { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, }, + { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_tse_ids); + +static struct platform_driver altera_tse_driver = { + .probe = altera_tse_probe, + .remove = altera_tse_remove, + .suspend = NULL, + .resume = NULL, + .driver = { + .name = ALTERA_TSE_RESOURCE_NAME, + .owner = THIS_MODULE, + .of_match_table = altera_tse_ids, + }, +}; + +module_platform_driver(altera_tse_driver); + +MODULE_AUTHOR("Altera Corporation"); +MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c new file mode 100644 index 000000000000..70fa13f486b2 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_utils.c @@ -0,0 +1,44 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "altera_tse.h" +#include "altera_utils.h" + +void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) +{ + u32 value = ioread32(ioaddr); + value |= bit_mask; + iowrite32(value, ioaddr); +} + +void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) +{ + u32 value = ioread32(ioaddr); + value &= ~bit_mask; + iowrite32(value, ioaddr); +} + +int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) +{ + u32 value = ioread32(ioaddr); + return (value & bit_mask) ? 1 : 0; +} + +int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) +{ + u32 value = ioread32(ioaddr); + return (value & bit_mask) ? 0 : 1; +} diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h new file mode 100644 index 000000000000..ce1db36d3583 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_utils.h @@ -0,0 +1,27 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> + +#ifndef __ALTERA_UTILS_H__ +#define __ALTERA_UTILS_H__ + +void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); +void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); +int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); +int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); + +#endif /* __ALTERA_UTILS_H__*/ diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index 18e542f7853d..98a10d555b79 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) outs++; /* Kick the lance: transmit now */ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); spin_lock_irqsave(&lp->devlock, flags); if (TX_BUFFS_AVAIL) diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 9793767996a2..87e727b921dc 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN) netif_stop_queue(dev); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 2061b471fd16..26efaaa5e73f 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -720,6 +720,9 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) int rx_pkt_limit = budget; unsigned long flags; + if (rx_pkt_limit <= 0) + goto rx_not_empty; + do{ /* process receive packets until we use the quota*/ /* If we own the next entry, it's a new packet. Send it up. */ diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 9339cccfe05a..e7cc9174e364 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, struct pcnet32_rx_head *new_rx_ring; struct sk_buff **new_skb_list; int new, overlap; + unsigned int entries = 1 << size; new_rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * - (1 << size), + entries, &new_ring_dma_addr); if (new_rx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return; } - memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); + memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries); - new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC); + new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) goto free_new_rx_ring; - new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), - GFP_ATOMIC); + new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) goto free_new_lists; /* first copy the current receive buffers */ - overlap = min(size, lp->rx_ring_size); + overlap = min(entries, lp->rx_ring_size); for (new = 0; new < overlap; new++) { new_rx_ring[new] = lp->rx_ring[new]; new_dma_addr_list[new] = lp->rx_dma_addr[new]; new_skb_list[new] = lp->rx_skbuff[new]; } /* now allocate any new buffers needed */ - for (; new < size; new++) { + for (; new < entries; new++) { struct sk_buff *rx_skbuff; new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); rx_skbuff = new_skb_list[new]; @@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, new_dma_addr_list[new] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) { + netif_err(lp, drv, dev, "%s dma mapping failed\n", + __func__); + dev_kfree_skb(new_skb_list[new]); + goto free_all_new; + } new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); new_rx_ring[new].status = cpu_to_le16(0x8000); @@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { if (lp->rx_skbuff[new]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[new])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb(lp->rx_skbuff[new]); } } @@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, lp->rx_ring_size, lp->rx_ring, lp->rx_ring_dma_addr); - lp->rx_ring_size = (1 << size); + lp->rx_ring_size = entries; lp->rx_mod_mask = lp->rx_ring_size - 1; lp->rx_len_bits = (size << 4); lp->rx_ring = new_rx_ring; @@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, free_all_new: while (--new >= lp->rx_ring_size) { if (new_skb_list[new]) { - pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) + pci_unmap_single(lp->pci_dev, + new_dma_addr_list[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb(new_skb_list[new]); } } @@ -634,8 +649,7 @@ free_new_lists: kfree(new_dma_addr_list); free_new_rx_ring: pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - (1 << size), + sizeof(struct pcnet32_rx_head) * entries, new_rx_ring, new_ring_dma_addr); } @@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev) lp->rx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[i], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(lp->rx_skbuff[i]); } lp->rx_skbuff[i] = NULL; @@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "DMA mapping error at line: %d!\n", + __LINE__); + goto clean_up; + } lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[x].status = cpu_to_le16(status); @@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev, if (pkt_len > rx_copybreak) { struct sk_buff *newskb; + dma_addr_t new_dma_addr; newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); + /* + * map the new buffer, if mapping fails, drop the packet and + * reuse the old buffer + */ if (newskb) { skb_reserve(newskb, NET_IP_ALIGN); - skb = lp->rx_skbuff[entry]; - pci_unmap_single(lp->pci_dev, - lp->rx_dma_addr[entry], - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[entry] = newskb; - lp->rx_dma_addr[entry] = - pci_map_single(lp->pci_dev, - newskb->data, - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); - rx_in_place = 1; + new_dma_addr = pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { + netif_err(lp, rx_err, dev, + "DMA mapping error.\n"); + dev_kfree_skb(newskb); + skb = NULL; + } else { + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[entry], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr[entry] = new_dma_addr; + rxp->base = cpu_to_le32(new_dma_addr); + rx_in_place = 1; + } } else skb = NULL; } else @@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev) lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], - lp->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->tx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); dev_kfree_skb_any(lp->tx_skbuff[i]); } lp->tx_skbuff[i] = NULL; @@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev) } rmb(); - if (lp->rx_dma_addr[i] == 0) + if (lp->rx_dma_addr[i] == 0) { lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) { + /* there is not much we can do at this point */ + netif_err(lp, drv, dev, + "%s pci dma mapping error\n", + __func__); + return -1; + } + } lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); wmb(); /* Make sure owner changes after all others are visible */ @@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, lp->tx_ring[entry].misc = 0x00000000; - lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + goto drop_packet; + } + lp->tx_skbuff[entry] = skb; lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[entry].status = cpu_to_le16(status); @@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, lp->tx_full = 1; netif_stop_queue(dev); } +drop_packet: spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 2e45f6ec1bf0..17bb9ce96260 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -535,7 +535,7 @@ static int alx_alloc_descriptors(struct alx_priv *alx) if (!alx->descmem.virt) goto out_free; - alx->txq.tpd = (void *)alx->descmem.virt; + alx->txq.tpd = alx->descmem.virt; alx->txq.tpd_dma = alx->descmem.dma; /* alignment requirement for next block */ @@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; drop: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used for descriptors. */ - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA config, aborting\n"); - goto out_pci_disable; - } + dev_err(&pdev->dev, "No usable DMA config, aborting\n"); + goto out_pci_disable; } } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 4d3258dd0a88..e11bf18fbbd1 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter) } static inline void atl1c_clean_buffer(struct pci_dev *pdev, - struct atl1c_buffer *buffer_info, int in_irq) + struct atl1c_buffer *buffer_info) { u16 pci_driection; if (buffer_info->flags & ATL1C_BUFFER_FREE) @@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev, pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, pci_driection); } - if (buffer_info->skb) { - if (in_irq) - dev_kfree_skb_irq(buffer_info->skb); - else - dev_kfree_skb(buffer_info->skb); - } + if (buffer_info->skb) + dev_consume_skb_any(buffer_info->skb); buffer_info->dma = 0; buffer_info->skb = NULL; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); @@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, ring_count = tpd_ring->count; for (index = 0; index < ring_count; index++) { buffer_info = &tpd_ring->buffer_info[index]; - atl1c_clean_buffer(pdev, buffer_info, 0); + atl1c_clean_buffer(pdev, buffer_info); } /* Zero out Tx-buffers */ @@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) for (j = 0; j < rfd_ring->count; j++) { buffer_info = &rfd_ring->buffer_info[j]; - atl1c_clean_buffer(pdev, buffer_info, 0); + atl1c_clean_buffer(pdev, buffer_info); } /* zero out the descriptor ring */ memset(rfd_ring->desc, 0, rfd_ring->size); @@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, while (next_to_clean != hw_next_to_clean) { buffer_info = &tpd_ring->buffer_info[next_to_clean]; - atl1c_clean_buffer(pdev, buffer_info, 1); + atl1c_clean_buffer(pdev, buffer_info); if (++next_to_clean == tpd_ring->count) next_to_clean = 0; atomic_set(&tpd_ring->next_to_clean, next_to_clean); @@ -1977,17 +1973,17 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter, enum atl1c_trans_queue type) { struct pci_dev *pdev = adapter->pdev; + unsigned short offload_type; u8 hdr_len; u32 real_len; - unsigned short offload_type; - int err; if (skb_is_gso(skb)) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (unlikely(err)) - return -1; - } + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + offload_type = skb_shinfo(skb)->gso_type; if (offload_type & SKB_GSO_TCPV4) { @@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt, while (index != tpd_ring->next_to_use) { tpd = ATL1C_TPD_DESC(tpd_ring, index); buffer_info = &tpd_ring->buffer_info[index]; - atl1c_clean_buffer(adpt->pdev, buffer_info, 0); + atl1c_clean_buffer(adpt->pdev, buffer_info); memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); if (++index == tpd_ring->count) index = 0; @@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, /* roll back tpd/buffer */ atl1c_tx_rollback(adapter, tpd, type); spin_unlock_irqrestore(&adapter->tx_lock, flags); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); } else { atl1c_tx_queue(adapter, skb, tpd, type); spin_unlock_irqrestore(&adapter->tx_lock, flags); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index d5c2d3e912e5..4345332533ad 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1641,17 +1641,17 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) static int atl1e_tso_csum(struct atl1e_adapter *adapter, struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { + unsigned short offload_type; u8 hdr_len; u32 real_len; - unsigned short offload_type; - int err; if (skb_is_gso(skb)) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (unlikely(err)) - return -1; - } + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + offload_type = skb_shinfo(skb)->gso_type; if (offload_type & SKB_GSO_TCPV4) { @@ -2436,7 +2436,7 @@ err_reset: err_register: err_sw_init: err_eeprom: - iounmap(adapter->hw.hw_addr); + pci_iounmap(pdev, adapter->hw.hw_addr); err_init_netdev: err_ioremap: free_netdev(netdev); @@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev) unregister_netdev(netdev); atl1e_free_ring_resources(adapter); atl1e_force_ps(&adapter->hw); - iounmap(adapter->hw.hw_addr); + pci_iounmap(pdev, adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 287272dd69da..dfd0e91fa726 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2118,18 +2118,17 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) } static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tx_packet_desc *ptpd) + struct tx_packet_desc *ptpd) { u8 hdr_len, ip_off; u32 real_len; - int err; if (skb_shinfo(skb)->gso_size) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (unlikely(err)) - return -1; - } + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); @@ -2175,7 +2174,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, return 3; } } - return false; + return 0; } static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 265ce1b752ed..78befb522a52 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -55,6 +55,7 @@ static const char atl2_driver_name[] = "atl2"; static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; static const char atl2_driver_version[] = ATL2_DRV_VERSION; +static const struct ethtool_ops atl2_ethtool_ops; MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>"); MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); @@ -71,8 +72,6 @@ static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); -static void atl2_set_ethtool_ops(struct net_device *netdev); - static void atl2_check_options(struct atl2_adapter *adapter); /** @@ -1397,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) atl2_setup_pcicmd(pdev); netdev->netdev_ops = &atl2_netdev_ops; - atl2_set_ethtool_ops(netdev); + SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); netdev->watchdog_timeo = 5 * HZ; strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); @@ -2105,11 +2104,6 @@ static const struct ethtool_ops atl2_ethtool_ops = { .set_eeprom = atl2_set_eeprom, }; -static void atl2_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); -} - #define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ (((a) & 0xff00ff00) >> 8)) #define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 3f97d9fd0a71..85dbddd03722 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -60,6 +60,17 @@ config BCM63XX_ENET This driver supports the ethernet MACs in the Broadcom 63xx MIPS chipset family (BCM63XX). +config BCMGENET + tristate "Broadcom GENET internal MAC support" + depends on OF + select MII + select PHYLIB + select FIXED_PHY if BCMGENET=y + select BCM7XXX_PHY + help + This driver supports the built-in Ethernet MACs found in the + Broadcom BCM7xxx Set Top Box family chipset. + config BNX2 tristate "Broadcom NetXtremeII support" depends on PCI diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 68efa1a3fb88..fd639a0d4c7d 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o +obj-$(CONFIG_BCMGENET) += genet/ obj-$(CONFIG_BNX2) += bnx2.o obj-$(CONFIG_CNIC) += cnic.o obj-$(CONFIG_BNX2X) += bnx2x/ diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 8a7bf7dad898..05ba62589017 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1685,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, unsigned int start; do { - start = u64_stats_fetch_begin_bh(&hwstat->syncp); + start = u64_stats_fetch_begin_irq(&hwstat->syncp); /* Convert HW stats into rtnl_link_stats64 stats. */ nstat->rx_packets = hwstat->rx_pkts; @@ -1719,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, /* Carrier lost counter seems to be broken for some devices */ nstat->tx_carrier_errors = hwstat->tx_carrier_lost; #endif - } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); return nstat; } @@ -2073,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev, do { data_src = &hwstat->tx_good_octets; data_dst = data; - start = u64_stats_fetch_begin_bh(&hwstat->syncp); + start = u64_stats_fetch_begin_irq(&hwstat->syncp); for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) *data_dst++ = *data_src++; - } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); } static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index b9a5fb6400d3..a7d11f5565d6 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1722,9 +1722,6 @@ static const struct net_device_ops bcm_enet_ops = { .ndo_set_rx_mode = bcm_enet_set_multicast_list, .ndo_do_ioctl = bcm_enet_ioctl, .ndo_change_mtu = bcm_enet_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bcm_enet_netpoll, -#endif }; /* diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index cda25ac45b47..a8efb18e42fa 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -2507,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) bp->fw_wr_seq++; msg_data |= bp->fw_wr_seq; + bp->fw_last_msg = msg_data; bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); @@ -2885,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) sw_cons = BNX2_NEXT_TX_BD(sw_cons); tx_bytes += skb->len; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_pkt++; if (tx_pkt == budget) break; @@ -3132,6 +3133,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct l2_fhdr *rx_hdr; int rx_pkt = 0, pg_ring_used = 0; + if (budget <= 0) + return rx_pkt; + hw_cons = bnx2_get_hw_rx_cons(bnapi); sw_cons = rxr->rx_cons; sw_prod = rxr->rx_prod; @@ -4000,8 +4004,23 @@ bnx2_setup_wol(struct bnx2 *bp) wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; } - if (!(bp->flags & BNX2_FLAG_NO_WOL)) - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); + if (!(bp->flags & BNX2_FLAG_NO_WOL)) { + u32 val; + + wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; + if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { + bnx2_fw_sync(bp, wol_msg, 1, 0); + return; + } + /* Tell firmware not to power down the PHY yet, otherwise + * the chip will take a long time to respond to MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, + val | BNX2_PORT_FEATURE_ASF_ENABLED); + bnx2_fw_sync(bp, wol_msg, 1, 0); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); + } } @@ -4033,9 +4052,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) if (bp->wol) pci_set_power_state(bp->pdev, PCI_D3hot); - } else { - pci_set_power_state(bp->pdev, PCI_D3hot); + break; + } + if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { + u32 val; + + /* Tell firmware not to power down the PHY yet, + * otherwise the other port may not respond to + * MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + val &= ~BNX2_CONDITION_PM_STATE_MASK; + val |= BNX2_CONDITION_PM_STATE_UNPREP; + bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); + } + pci_set_power_state(bp->pdev, PCI_D3hot); /* No more memory access after this point until * device is brought back to D0. @@ -6206,7 +6238,7 @@ bnx2_free_irq(struct bnx2 *bp) static void bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) { - int i, total_vecs, rc; + int i, total_vecs; struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; struct net_device *dev = bp->dev; const int len = sizeof(bp->irq_tbl[0].name); @@ -6229,16 +6261,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) #ifdef BCM_CNIC total_vecs++; #endif - rc = -ENOSPC; - while (total_vecs >= BNX2_MIN_MSIX_VEC) { - rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); - if (rc <= 0) - break; - if (rc > 0) - total_vecs = rc; - } - - if (rc != 0) + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, + BNX2_MIN_MSIX_VEC, total_vecs); + if (total_vecs < 0) return; msix_vecs = total_vecs; @@ -6611,7 +6636,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -6704,7 +6729,7 @@ dma_error: PCI_DMA_TODEVICE); } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index f1cf2c44e7ed..e341bc366fa5 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -6900,6 +6900,7 @@ struct bnx2 { u16 fw_wr_seq; u16 fw_drv_pulse_wr_seq; + u32 fw_last_msg; int rx_max_ring; int rx_ring_size; @@ -7406,6 +7407,10 @@ struct bnx2_rv2p_fw_file { #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 +#define BNX2_CONDITION_PM_STATE_MASK 0x00030000 +#define BNX2_CONDITION_PM_STATE_FULL 0x00030000 +#define BNX2_CONDITION_PM_STATE_PREP 0x00020000 +#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000 #define BNX2_BC_STATE_DEBUG_CMD 0x1dc #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 391f29ef6d2e..4d8f8aba0ea5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -26,8 +26,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.78.17-0" -#define DRV_MODULE_RELDATE "2013/04/11" +#define DRV_MODULE_VERSION "1.78.19-0" +#define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -75,13 +75,22 @@ enum bnx2x_int_mode { #define BNX2X_MSG_DCB 0x8000000 /* regular debug print */ +#define DP_INNER(fmt, ...) \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); + #define DP(__mask, fmt, ...) \ do { \ if (unlikely(bp->msg_enable & (__mask))) \ - pr_notice("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", \ - ##__VA_ARGS__); \ + DP_INNER(fmt, ##__VA_ARGS__); \ +} while (0) + +#define DP_AND(__mask, fmt, ...) \ +do { \ + if (unlikely((bp->msg_enable & (__mask)) == __mask)) \ + DP_INNER(fmt, ##__VA_ARGS__); \ } while (0) #define DP_CONT(__mask, fmt, ...) \ @@ -1146,10 +1155,6 @@ struct bnx2x_port { (offsetof(struct bnx2x_eth_stats, stat_name) / 4) /* slow path */ - -/* slow path work-queue */ -extern struct workqueue_struct *bnx2x_wq; - #define BNX2X_MAX_NUM_OF_VFS 64 #define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) @@ -1261,6 +1266,7 @@ struct bnx2x_slowpath { union { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; + struct tpa_update_ramrod_data tpa_data; } q_rdata; union { @@ -1392,7 +1398,7 @@ struct bnx2x_fw_stats_data { }; /* Public slow path states */ -enum { +enum sp_rtnl_flag { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, BNX2X_SP_RTNL_FAN_FAILURE, @@ -1403,6 +1409,12 @@ enum { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, + BNX2X_SP_RTNL_GET_DRV_VERSION, +}; + +enum bnx2x_iov_flag { + BNX2X_IOV_HANDLE_VF_MSG, + BNX2X_IOV_HANDLE_FLR, }; struct bnx2x_prev_path_list { @@ -1603,6 +1615,8 @@ struct bnx2x { int mrrs; struct delayed_work sp_task; + struct delayed_work iov_task; + atomic_t interrupt_occurred; struct delayed_work sp_rtnl_task; @@ -1693,6 +1707,10 @@ struct bnx2x { struct bnx2x_slowpath *slowpath; dma_addr_t slowpath_mapping; + /* Mechanism protecting the drv_info_to_mcp */ + struct mutex drv_info_mutex; + bool drv_info_mng_owner; + /* Total number of FW statistics requests */ u8 fw_stats_num; @@ -1882,6 +1900,9 @@ struct bnx2x { /* operation indication for the sp_rtnl task */ unsigned long sp_rtnl_state; + /* Indication of the IOV tasks */ + unsigned long iov_task_state; + /* DCBX Negotiation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; @@ -2525,6 +2546,8 @@ enum { void bnx2x_set_local_cmng(struct bnx2x *bp); +void bnx2x_update_mng_version(struct bnx2x *bp); + #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dbcff509dc3f..9261d5313b5b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp) static int bnx2x_calc_num_queues(struct bnx2x *bp) { - return bnx2x_num_queues ? - min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) : - min_t(int, netif_get_num_default_rss_queues(), - BNX2X_MAX_QUEUES(bp)); + int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); + + /* Reduce memory usage in kdump environment by using only one queue */ + if (reset_devices) + nq = 1; + + nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); + return nq; } /** @@ -868,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (unlikely(bp->panic)) return 0; #endif + if (budget <= 0) + return rx_pkt; bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; @@ -1638,36 +1644,16 @@ int bnx2x_enable_msix(struct bnx2x *bp) DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", msix_vec); - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec); - + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], + BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); /* * reconfigure number of tx/rx queues according to available * MSI-X vectors */ - if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) { - /* how less vectors we will have? */ - int diff = msix_vec - rc; - - BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); - - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); - - if (rc) { - BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); - goto no_msix; - } - /* - * decrease number of queues by number of unallocated entries - */ - bp->num_ethernet_queues -= diff; - bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; - - BNX2X_DEV_INFO("New queue configuration set: %d\n", - bp->num_queues); - } else if (rc > 0) { + if (rc == -ENOSPC) { /* Get by with single vector */ - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1); - if (rc) { + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); + if (rc < 0) { BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", rc); goto no_msix; @@ -1680,8 +1666,22 @@ int bnx2x_enable_msix(struct bnx2x *bp) bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; } else if (rc < 0) { - BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); + BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); goto no_msix; + } else if (rc < msix_vec) { + /* how less vectors we will have? */ + int diff = msix_vec - rc; + + BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); + + /* + * decrease number of queues by number of unallocated entries + */ + bp->num_ethernet_queues -= diff; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + + BNX2X_DEV_INFO("New queue configuration set: %d\n", + bp->num_queues); } bp->flags |= USING_MSIX_FLAG; @@ -2234,8 +2234,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) sizeof(struct per_queue_stats) * num_queue_stats + sizeof(struct stats_counter); - BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); + bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (!bp->fw_stats) + goto alloc_mem_err; /* Set shortcuts */ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; @@ -2802,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (CNIC_ENABLED(bp)) bnx2x_load_cnic(bp); + if (IS_PF(bp)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ u32 val; @@ -3028,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->state = BNX2X_STATE_CLOSED; bp->cnic_loaded = false; + /* Clear driver version indication in shmem */ + if (IS_PF(bp)) + bnx2x_update_mng_version(bp); + /* Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ @@ -4370,14 +4379,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) if (!IS_FCOE_IDX(index)) { /* status blocks */ - if (!CHIP_IS_E1x(bp)) - BNX2X_PCI_ALLOC(sb->e2_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(sb->e1x_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e1x)); + if (!CHIP_IS_E1x(bp)) { + sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + if (!sb->e2_sb) + goto alloc_mem_err; + } else { + sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); + if (!sb->e1x_sb) + goto alloc_mem_err; + } } /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to @@ -4396,35 +4408,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) "allocating tx memory of fp %d cos %d\n", index, cos); - BNX2X_ALLOC(txdata->tx_buf_ring, - sizeof(struct sw_tx_bd) * NUM_TX_BD); - BNX2X_PCI_ALLOC(txdata->tx_desc_ring, - &txdata->tx_desc_mapping, - sizeof(union eth_tx_bd_types) * NUM_TX_BD); + txdata->tx_buf_ring = kcalloc(NUM_TX_BD, + sizeof(struct sw_tx_bd), + GFP_KERNEL); + if (!txdata->tx_buf_ring) + goto alloc_mem_err; + txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + if (!txdata->tx_desc_ring) + goto alloc_mem_err; } } /* Rx */ if (!skip_rx_queue(bp, index)) { /* fastpath rx rings: rx_buf rx_desc rx_comp */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), - sizeof(struct sw_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), - &bnx2x_fp(bp, index, rx_desc_mapping), - sizeof(struct eth_rx_bd) * NUM_RX_BD); + bnx2x_fp(bp, index, rx_buf_ring) = + kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_buf_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_desc_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + if (!bnx2x_fp(bp, index, rx_desc_ring)) + goto alloc_mem_err; /* Seed all CQEs by 1s */ - BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), - &bnx2x_fp(bp, index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); + bnx2x_fp(bp, index, rx_comp_ring) = + BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); + if (!bnx2x_fp(bp, index, rx_comp_ring)) + goto alloc_mem_err; /* SGE ring */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), - sizeof(struct sw_rx_page) * NUM_RX_SGE); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), - &bnx2x_fp(bp, index, rx_sge_mapping), - BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + bnx2x_fp(bp, index, rx_page_ring) = + kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page), + GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_page_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_sge_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + if (!bnx2x_fp(bp, index, rx_sge_ring)) + goto alloc_mem_err; /* RX BD ring */ bnx2x_set_next_page_rx_bd(fp); @@ -4780,12 +4806,8 @@ void bnx2x_tx_timeout(struct net_device *dev) bnx2x_panic(); #endif - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - /* This allows the netif to be shutdown gracefully before resetting */ - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); } int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) @@ -4913,3 +4935,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, disable = disable ? 1 : (usec ? 0 : 1); storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); } + +void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, + u32 verbose) +{ + smp_mb__before_clear_bit(); + set_bit(flag, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", + flag); + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} +EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a89a40f88c25..05f4f5f52635 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -47,31 +47,26 @@ extern int bnx2x_num_queues; } \ } while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ - do { \ - x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ - (unsigned long long)(*y), x); \ - } while (0) - -#define BNX2X_PCI_FALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - memset((void *)x, 0xFFFFFFFF, size); \ - DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ - (unsigned long long)(*y), x); \ - } while (0) - -#define BNX2X_ALLOC(x, size) \ - do { \ - x = kzalloc(size, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - } while (0) +#define BNX2X_PCI_ALLOC(y, size) \ +({ \ + void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + x; \ +}) +#define BNX2X_PCI_FALLOC(y, size) \ +({ \ + void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) { \ + memset(x, 0xff, size); \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + } \ + x; \ +}) /*********************** Interfaces **************************** * Functions that need to be implemented by each driver version @@ -1324,4 +1319,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); int bnx2x_drain_tx_queues(struct bnx2x *bp); void bnx2x_squeeze_objects(struct bnx2x *bp); +void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, + u32 verbose); + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fdace204b054..97ea5421dd96 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) * as we are handling an attention on a work queue which must be * flushed at some rtnl-locked contexts (e.g. if down) */ - if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0); } void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) @@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) if (IS_MF(bp)) bnx2x_link_sync_notify(bp); - set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); - + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0); return; } case BNX2X_DCBX_STATE_TX_PAUSED: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 38fc794c1655..b6de05e3149b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev, #define IS_PORT_STAT(i) \ ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define IS_MF_MODE_STAT(bp) \ - (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) +#define HIDE_PORT_STAT(bp) \ + ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ + IS_VF(bp)) /* ethtool statistics are displayed for all regular ethernet queues and the * fcoe L2 queue if not disabled @@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) BNX2X_NUM_Q_STATS; } else num_strings = 0; - if (IS_MF_MODE_STAT(bp)) { + if (HIDE_PORT_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) if (IS_FUNC_STAT(i)) num_strings++; @@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; strcpy(buf + (k + j)*ETH_GSTRING_LEN, bnx2x_stats_arr[i].string); @@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, hw_stats = (u32 *)&bp->eth_stats; for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; if (bnx2x_stats_arr[i].size == 0) { /* skip this counter */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 84aecdf06f7a..95dc36543548 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -87,7 +87,6 @@ (IRO[156].base + ((vfId) * IRO[156].m1)) #define CSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[150].base + ((funcId) * IRO[150].m1)) -#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ (IRO[203].base + ((pfId) * IRO[203].m1)) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index cf1df8b62e2c..5ba8af50c84f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -2003,6 +2003,23 @@ struct shmem_lfa { #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) }; +/* Used to support NSCI get OS driver version + * on driver load the version value will be set + * on driver unload driver value of 0x0 will be set. + */ +struct os_drv_ver { +#define DRV_VER_NOT_LOADED 0 + + /* personalties order is important */ +#define DRV_PERS_ETHERNET 0 +#define DRV_PERS_ISCSI 1 +#define DRV_PERS_FCOE 2 + + /* shmem2 struct is constant can't add more personalties here */ +#define MAX_DRV_PERS 3 + u32 versions[MAX_DRV_PERS]; +}; + struct ncsi_oem_fcoe_features { u32 fcoe_features1; #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF @@ -2217,6 +2234,18 @@ struct shmem2_region { u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) + + u32 reserved5[2]; + u32 reserved6[PORT_MAX]; + + /* driver version for each personality */ + struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ + + /* Flag to the driver that PF's drv_info_host_addr buffer was read */ + u32 mfw_drv_indication; + + /* We use indication for each PF (0..3) */ +#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) }; @@ -2848,7 +2877,7 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 8 -#define BCM_5710_FW_REVISION_VERSION 17 +#define BCM_5710_FW_REVISION_VERSION 19 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7d4382286457..a78edaccceee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -120,7 +120,8 @@ static int debug; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, " Default debug msglevel"); -struct workqueue_struct *bnx2x_wq; +static struct workqueue_struct *bnx2x_wq; +struct workqueue_struct *bnx2x_iov_wq; struct bnx2x_mac_vals { u32 xmac_addr; @@ -918,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) u16 start = 0, end = 0; u8 cos; #endif - if (disable_int) + if (IS_PF(bp) && disable_int) bnx2x_int_disable(bp); bp->stats_state = STATS_STATE_DISABLED; @@ -929,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Indices */ /* Common */ - BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", - bp->def_idx, bp->def_att_idx, bp->attn_state, - bp->spq_prod_idx, bp->stats_counter); - BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", - bp->def_status_blk->atten_status_block.attn_bits, - bp->def_status_blk->atten_status_block.attn_bits_ack, - bp->def_status_blk->atten_status_block.status_block_id, - bp->def_status_blk->atten_status_block.attn_bits_index); - BNX2X_ERR(" def ("); - for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) - pr_cont("0x%x%s", - bp->def_status_blk->sp_sb.index_values[i], - (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); - - for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) - *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + - i*sizeof(u32)); - - pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", - sp_sb_data.igu_sb_id, - sp_sb_data.igu_seg_id, - sp_sb_data.p_func.pf_id, - sp_sb_data.p_func.vnic_id, - sp_sb_data.p_func.vf_id, - sp_sb_data.p_func.vf_valid, - sp_sb_data.state); + if (IS_PF(bp)) { + struct host_sp_status_block *def_sb = bp->def_status_blk; + int data_size, cstorm_offset; + + BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + bp->def_idx, bp->def_att_idx, bp->attn_state, + bp->spq_prod_idx, bp->stats_counter); + BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", + def_sb->atten_status_block.attn_bits, + def_sb->atten_status_block.attn_bits_ack, + def_sb->atten_status_block.status_block_id, + def_sb->atten_status_block.attn_bits_index); + BNX2X_ERR(" def ("); + for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) + pr_cont("0x%x%s", + def_sb->sp_sb.index_values[i], + (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); + + data_size = sizeof(struct hc_sp_status_block_data) / + sizeof(u32); + cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); + for (i = 0; i < data_size; i++) + *((u32 *)&sp_sb_data + i) = + REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + + i * sizeof(u32)); + + pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", + sp_sb_data.igu_sb_id, + sp_sb_data.igu_seg_id, + sp_sb_data.p_func.pf_id, + sp_sb_data.p_func.vnic_id, + sp_sb_data.p_func.vf_id, + sp_sb_data.p_func.vf_valid, + sp_sb_data.state); + } for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; @@ -1013,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) pr_cont("0x%x%s", fp->sb_index_values[j], (j == loop - 1) ? ")" : " "); + + /* VF cannot access FW refelection for status block */ + if (IS_VF(bp)) + continue; + /* fw sb data */ data_size = CHIP_IS_E1x(bp) ? sizeof(struct hc_status_block_data_e1x) : @@ -1064,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) } #ifdef BNX2X_STOP_ON_ERROR - - /* event queue */ - BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); - for (i = 0; i < NUM_EQ_DESC; i++) { - u32 *data = (u32 *)&bp->eq_ring[i].message.data; - - BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", - i, bp->eq_ring[i].message.opcode, - bp->eq_ring[i].message.error); - BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); + if (IS_PF(bp)) { + /* event queue */ + BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); + for (i = 0; i < NUM_EQ_DESC; i++) { + u32 *data = (u32 *)&bp->eq_ring[i].message.data; + + BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", + i, bp->eq_ring[i].message.opcode, + bp->eq_ring[i].message.error); + BNX2X_ERR("data: %x %x %x\n", + data[0], data[1], data[2]); + } } /* Rings */ @@ -1140,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) } } #endif - bnx2x_fw_dump(bp); - bnx2x_mc_assert(bp); + if (IS_PF(bp)) { + bnx2x_fw_dump(bp); + bnx2x_mc_assert(bp); + } BNX2X_ERR("end crash dump -----------------\n"); } @@ -1814,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) drv_cmd = BNX2X_Q_CMD_EMPTY; break; + case (RAMROD_CMD_ID_ETH_TPA_UPDATE): + DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; + break; + default: BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", command, fp->index); @@ -1834,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) #else return; #endif - /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid, true); smp_mb__before_atomic_inc(); atomic_inc(&bp->cq_spq_left); @@ -3460,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp) bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } +#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) +#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) + static void bnx2x_handle_drv_info_req(struct bnx2x *bp) { enum drv_info_opcode op_code; u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); + bool release = false; + int wait; /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { @@ -3474,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT; + /* Must prevent other flows from accessing drv_info_to_mcp */ + mutex_lock(&bp->drv_info_mutex); + memset(&bp->slowpath->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); @@ -3490,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) default: /* if op code isn't supported - send NACK */ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); - return; + goto out; } /* if we got drv_info attn from MFW then these fields are defined in @@ -3502,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); + + /* Since possible management wants both this and get_driver_version + * need to wait until management notifies us it finished utilizing + * the buffer. + */ + if (!SHMEM2_HAS(bp, mfw_drv_indication)) { + DP(BNX2X_MSG_MCP, "Management does not support indication\n"); + } else if (!bp->drv_info_mng_owner) { + u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); + + for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { + u32 indication = SHMEM2_RD(bp, mfw_drv_indication); + + /* Management is done; need to clear indication */ + if (indication & bit) { + SHMEM2_WR(bp, mfw_drv_indication, + indication & ~bit); + release = true; + break; + } + + msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); + } + } + if (!release) { + DP(BNX2X_MSG_MCP, "Management did not release indication\n"); + bp->drv_info_mng_owner = true; + } + +out: + mutex_unlock(&bp->drv_info_mutex); +} + +static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) +{ + u8 vals[4]; + int i = 0; + + if (bnx2x_format) { + i = sscanf(version, "1.%c%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + if (i > 0) + vals[0] -= '0'; + } else { + i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + } + + while (i < 4) + vals[i++] = 0; + + return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; +} + +void bnx2x_update_mng_version(struct bnx2x *bp) +{ + u32 iscsiver = DRV_VER_NOT_LOADED; + u32 fcoever = DRV_VER_NOT_LOADED; + u32 ethver = DRV_VER_NOT_LOADED; + int idx = BP_FW_MB_IDX(bp); + u8 *version; + + if (!SHMEM2_HAS(bp, func_os_drv_ver)) + return; + + mutex_lock(&bp->drv_info_mutex); + /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ + if (bp->drv_info_mng_owner) + goto out; + + if (bp->state != BNX2X_STATE_OPEN) + goto out; + + /* Parse ethernet driver version */ + ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + if (!CNIC_LOADED(bp)) + goto out; + + /* Try getting storage driver version via cnic */ + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_iscsi_stat(bp); + version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; + iscsiver = bnx2x_update_mng_version_utility(version, false); + + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_fcoe_stat(bp); + version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; + fcoever = bnx2x_update_mng_version_utility(version, false); + +out: + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); + + mutex_unlock(&bp->drv_info_mutex); + + DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", + ethver, iscsiver, fcoever); } static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) @@ -3644,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(bp, cid)); - type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - - type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); + /* In some cases, type may already contain the func-id + * mainly in SRIOV related use cases, so we add it here only + * if it's not already set. + */ + if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & + SPE_HDR_CONN_TYPE; + type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + } else { + type = cmd_type; + } spe->hdr.type = cpu_to_le16(type); @@ -3878,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); } static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -4025,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) bnx2x_handle_drv_info_req(bp); if (val & DRV_STATUS_VF_DISABLED) - bnx2x_vf_handle_flr_event(bp); + bnx2x_schedule_iov_task(bp, + BNX2X_IOV_HANDLE_FLR); if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); @@ -5216,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp) /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_VF_PF_CHANNEL: - DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); - bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); + bnx2x_vf_mbx_schedule(bp, + &elem->message.data.vf_pf_event); continue; case EVENT_RING_OPCODE_STAT_QUERY: - DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, - "got statistics comp event %d\n", - bp->stats_comp++); + DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), + "got statistics comp event %d\n", + bp->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; @@ -5273,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) break; } else { + int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; + DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); f_obj->complete_cmd(bp, f_obj, @@ -5282,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) * sp_rtnl task as all Queue SP operations * should run under rtnl_lock. */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, cmd, 0); } goto next_spqe; @@ -5435,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work) le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); } - /* must be called after the EQ processing (since eq leads to sriov - * ramrod completion flows). - * This flow may have been scheduled by the arrival of a ramrod - * completion, or by the sriov code rescheduling itself. - */ - bnx2x_iov_sp_task(bp); - /* afex - poll to check if VIFSET_ACK should be sent to MFW */ if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state)) { @@ -6005,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) { int i; - if (IS_MF_SI(bp)) - /* - * In switch independent mode, the TSTORM needs to accept - * packets that failed classification, since approximate match - * mac addresses aren't written to NIG LLH - */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); - else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); - /* Zero this manually as its initialization is currently missing in the initTool */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) @@ -7989,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp) int bnx2x_alloc_mem_cnic(struct bnx2x *bp) { - if (!CHIP_IS_E1x(bp)) + if (!CHIP_IS_E1x(bp)) { /* size = the status block + ramrod buffers */ - BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, - &bp->cnic_sb_mapping, - sizeof(struct - host_hc_status_block_e1x)); + bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + if (!bp->cnic_sb.e2_sb) + goto alloc_mem_err; + } else { + bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + if (!bp->cnic_sb.e1x_sb) + goto alloc_mem_err; + } - if (CONFIGURE_NIC_MODE(bp) && !bp->t2) + if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { /* allocate searcher T2 table, as it wasn't allocated before */ - BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } /* write address to which L5 should insert its values */ bp->cnic_eth_dev.addr_drv_info_to_mcp = @@ -8022,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp) { int i, allocated, context_size; - if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) + if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { /* allocate searcher T2 table */ - BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } - BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); + bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + if (!bp->def_status_blk) + goto alloc_mem_err; - BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, - sizeof(struct bnx2x_slowpath)); + bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + if (!bp->slowpath) + goto alloc_mem_err; /* Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT @@ -8050,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp) for (i = 0, allocated = 0; allocated < context_size; i++) { bp->context[i].size = min(CDU_ILT_PAGE_SZ, (context_size - allocated)); - BNX2X_PCI_ALLOC(bp->context[i].vcxt, - &bp->context[i].cxt_mapping, - bp->context[i].size); + bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, + bp->context[i].size); + if (!bp->context[i].vcxt) + goto alloc_mem_err; allocated += bp->context[i].size; } - BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); + bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), + GFP_KERNEL); + if (!bp->ilt->lines) + goto alloc_mem_err; if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) goto alloc_mem_err; @@ -8064,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp) goto alloc_mem_err; /* Slow path ring */ - BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); + bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); + if (!bp->spq) + goto alloc_mem_err; /* EQ */ - BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, - BCM_PAGE_SIZE * NUM_EQ_PAGES); + bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); + if (!bp->eq_ring) + goto alloc_mem_err; return 0; @@ -8849,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) synchronize_irq(bp->pdev->irq); flush_workqueue(bnx2x_wq); + flush_workqueue(bnx2x_iov_wq); while (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED && tout--) @@ -9774,6 +9909,10 @@ sp_rtnl_not_reset: bnx2x_dcbx_resume_hw_tx(bp); } + if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, + &bp->sp_rtnl_state)) + bnx2x_update_mng_version(bp); + /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@ -11724,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); + mutex_init(&bp->drv_info_mutex); + bp->drv_info_mng_owner = false; spin_lock_init(&bp->stats_lock); sema_init(&bp->stats_sema, 1); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); + INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); if (IS_PF(bp)) { rc = bnx2x_get_hwinfo(bp); if (rc) @@ -11771,6 +11913,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->disable_tpa = disable_tpa; bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); + /* Reduce memory usage in kdump environment by disabling TPA */ + bp->disable_tpa |= reset_devices; /* Set TPA flags */ if (bp->disable_tpa) { @@ -11942,7 +12086,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, { int mc_count = netdev_mc_count(bp->dev); struct bnx2x_mcast_list_elem *mc_mac = - kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); + kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC); struct netdev_hw_addr *ha; if (!mc_mac) @@ -12064,11 +12208,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev) return; } else { /* Schedule an SP task to handle rest of change */ - DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, + NETIF_MSG_IFUP); } } @@ -12101,11 +12242,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp) /* configuring mcast to a vf involves sleeping (when we * wait for the pf's response). */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_MCAST, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, + BNX2X_SP_RTNL_VFPF_MCAST, 0); } } @@ -13356,11 +13494,18 @@ static int __init bnx2x_init(void) pr_err("Cannot create workqueue\n"); return -ENOMEM; } + bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); + if (!bnx2x_iov_wq) { + pr_err("Cannot create iov workqueue\n"); + destroy_workqueue(bnx2x_wq); + return -ENOMEM; + } ret = pci_register_driver(&bnx2x_pci_driver); if (ret) { pr_err("Cannot register driver\n"); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); } return ret; } @@ -13372,6 +13517,7 @@ static void __exit bnx2x_cleanup(void) pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); /* Free globally allocated resources */ list_for_each_safe(pos, q, &bnx2x_prev_list) { @@ -13765,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) REG_WR(bp, scratch_offset + i, *(host_addr + i/4)); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } @@ -13782,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } @@ -13887,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, rcu_assign_pointer(bp->cnic_ops, ops); + /* Schedule driver to read CNIC driver versions */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 0fb6ff2ac8e3..31297266b743 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, raw->clear_pending(raw); return 0; } else { - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, raw->clear_pending(raw); return 0; } else { - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, rss_obj->config_rss = bnx2x_setup_rss; } -int validate_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac) -{ - if (!vlan_mac->get_n_elements) { - BNX2X_ERR("vlan mac object was not intialized\n"); - return -EINVAL; - } - return 0; -} - /********************** Queue state object ***********************************/ /** @@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, o->cids[cid_index], rdata->general.client_id, rdata->general.sp_client_id, rdata->general.cos); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_update_data(bp, o, update_params, rdata); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cids[cid_index], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp, return bnx2x_q_send_update(bp, params); } +static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_update_tpa_params *params, + struct tpa_update_ramrod_data *data) +{ + data->client_id = obj->cl_id; + data->complete_on_both_clients = params->complete_on_both_clients; + data->dont_verify_rings_pause_thr_flg = + params->dont_verify_thr; + data->max_agg_size = cpu_to_le16(params->max_agg_sz); + data->max_sges_for_packet = params->max_sges_pkt; + data->max_tpa_queues = params->max_tpa_queues; + data->sge_buff_size = cpu_to_le16(params->sge_buff_sz); + data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map)); + data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map)); + data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high); + data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low); + data->tpa_mode = params->tpa_mode; + data->update_ipv4 = params->update_ipv4; + data->update_ipv6 = params->update_ipv6; +} + static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, struct bnx2x_queue_state_params *params) { - /* TODO: Not implemented yet. */ - return -1; + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct tpa_update_ramrod_data *rdata = + (struct tpa_update_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_queue_update_tpa_params *update_tpa_params = + ¶ms->params.update_tpa; + u16 type; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); + + /* Add the function id inside the type, so that sp post function + * doesn't automatically add the PF func-id, this is required + * for operations done by PFs on behalf of their VFs + */ + type = ETH_CONNECTION_TYPE | + ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, + o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), type); } static inline int bnx2x_q_send_halt(struct bnx2x *bp, @@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, rdata->tx_switch_suspend = switch_update_params->suspend; rdata->echo = SWITCH_UPDATE; + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, U64_HI(data_mapping), U64_LO(data_mapping), NONE_CONNECTION_TYPE); @@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, rdata->allowed_priorities = afex_update_params->allowed_priorities; rdata->echo = AFEX_UPDATE; - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ DP(BNX2X_MSG_SP, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", @@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, rdata->traffic_type_to_priority_cos[i] = tx_start_params->traffic_type_to_priority_cos[i]; + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, U64_HI(data_mapping), U64_LO(data_mapping), NONE_CONNECTION_TYPE); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 00d7f214a40a..80f6c790ed88 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -893,6 +893,24 @@ struct bnx2x_queue_update_params { u8 cid_index; }; +struct bnx2x_queue_update_tpa_params { + dma_addr_t sge_map; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_pkt; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u8 _pad; + + u16 sge_buff_sz; + u16 max_agg_sz; + + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; +}; + struct rxq_pause_params { u16 bd_th_lo; u16 bd_th_hi; @@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params { /* Params according to the current command */ union { struct bnx2x_queue_update_params update; + struct bnx2x_queue_update_tpa_params update_tpa; struct bnx2x_queue_setup_params setup; struct bnx2x_queue_init_params init; struct bnx2x_queue_setup_tx_only_params tx_only; @@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, u8 *ind_table); -int validate_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac); #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e42f48df6e94..5c523b32db70 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -102,82 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, mmiowb(); barrier(); } -/* VFOP - VF slow-path operation support */ -#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 +static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, + struct bnx2x_virtf *vf, + bool print_err) +{ + if (!bnx2x_leading_vfq(vf, sp_initialized)) { + if (print_err) + BNX2X_ERR("Slowpath objects not yet initialized!\n"); + else + DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); + return false; + } + return true; +} /* VFOP operations states */ -enum bnx2x_vfop_qctor_state { - BNX2X_VFOP_QCTOR_INIT, - BNX2X_VFOP_QCTOR_SETUP, - BNX2X_VFOP_QCTOR_INT_EN -}; - -enum bnx2x_vfop_qdtor_state { - BNX2X_VFOP_QDTOR_HALT, - BNX2X_VFOP_QDTOR_TERMINATE, - BNX2X_VFOP_QDTOR_CFCDEL, - BNX2X_VFOP_QDTOR_DONE -}; - -enum bnx2x_vfop_vlan_mac_state { - BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, - BNX2X_VFOP_VLAN_MAC_CLEAR, - BNX2X_VFOP_VLAN_MAC_CHK_DONE, - BNX2X_VFOP_MAC_CONFIG_LIST, - BNX2X_VFOP_VLAN_CONFIG_LIST, - BNX2X_VFOP_VLAN_CONFIG_LIST_0 -}; - -enum bnx2x_vfop_qsetup_state { - BNX2X_VFOP_QSETUP_CTOR, - BNX2X_VFOP_QSETUP_VLAN0, - BNX2X_VFOP_QSETUP_DONE -}; - -enum bnx2x_vfop_mcast_state { - BNX2X_VFOP_MCAST_DEL, - BNX2X_VFOP_MCAST_ADD, - BNX2X_VFOP_MCAST_CHK_DONE -}; -enum bnx2x_vfop_qflr_state { - BNX2X_VFOP_QFLR_CLR_VLAN, - BNX2X_VFOP_QFLR_CLR_MAC, - BNX2X_VFOP_QFLR_TERMINATE, - BNX2X_VFOP_QFLR_DONE -}; - -enum bnx2x_vfop_flr_state { - BNX2X_VFOP_FLR_QUEUES, - BNX2X_VFOP_FLR_HW -}; - -enum bnx2x_vfop_close_state { - BNX2X_VFOP_CLOSE_QUEUES, - BNX2X_VFOP_CLOSE_HW -}; - -enum bnx2x_vfop_rxmode_state { - BNX2X_VFOP_RXMODE_CONFIG, - BNX2X_VFOP_RXMODE_DONE -}; - -enum bnx2x_vfop_qteardown_state { - BNX2X_VFOP_QTEARDOWN_RXMODE, - BNX2X_VFOP_QTEARDOWN_CLR_VLAN, - BNX2X_VFOP_QTEARDOWN_CLR_MAC, - BNX2X_VFOP_QTEARDOWN_CLR_MCAST, - BNX2X_VFOP_QTEARDOWN_QDTOR, - BNX2X_VFOP_QTEARDOWN_DONE -}; - -enum bnx2x_vfop_rss_state { - BNX2X_VFOP_RSS_CONFIG, - BNX2X_VFOP_RSS_DONE -}; - -#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) - void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, struct bnx2x_queue_setup_params *setup_params, @@ -221,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_prep(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q, - struct bnx2x_vfop_qctor_params *p, + struct bnx2x_vf_queue_construct_params *p, unsigned long q_type) { struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; @@ -290,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, } } -/* VFOP queue construction */ -static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_create(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; - struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; - enum bnx2x_vfop_qctor_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_QCTOR_INIT: - - /* has this queue already been opened? */ - if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == - BNX2X_Q_LOGICAL_STATE_ACTIVE) { - DP(BNX2X_MSG_IOV, - "Entered qctor but queue was already up. Aborting gracefully\n"); - goto op_done; - } - - /* next state */ - vfop->state = BNX2X_VFOP_QCTOR_SETUP; - - q_params->cmd = BNX2X_Q_CMD_INIT; - vfop->rc = bnx2x_queue_state_change(bp, q_params); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_QCTOR_SETUP: - /* next state */ - vfop->state = BNX2X_VFOP_QCTOR_INT_EN; - - /* copy pre-prepared setup params to the queue-state params */ - vfop->op_p->qctor.qstate.params.setup = - vfop->op_p->qctor.prep_qsetup; - - q_params->cmd = BNX2X_Q_CMD_SETUP; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + struct bnx2x_queue_state_params *q_params; + int rc = 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - case BNX2X_VFOP_QCTOR_INT_EN: + /* Prepare ramrod information */ + q_params = &qctor->qstate; + q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); - /* enable interrupts */ - bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), - USTORM_ID, 0, IGU_INT_ENABLE, 0); - goto op_done; - default: - bnx2x_vfop_default(state); + if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == + BNX2X_Q_LOGICAL_STATE_ACTIVE) { + DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); + goto out; } -op_err: - BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", - vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); -op_done: - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} -static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + /* Run Queue 'construction' ramrods */ + q_params->cmd = BNX2X_Q_CMD_INIT; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; - vfop->args.qctor.qid = qid; - vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); + memcpy(&q_params->params.setup, &qctor->prep_qsetup, + sizeof(struct bnx2x_queue_setup_params)); + q_params->cmd = BNX2X_Q_CMD_SETUP; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; - bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, - bnx2x_vfop_qctor, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, - cmd->block); - } - return -ENOMEM; + /* enable interrupts */ + bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), + USTORM_ID, 0, IGU_INT_ENABLE, 0); +out: + return rc; } -/* VFOP queue destruction */ -static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; - struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; - enum bnx2x_vfop_qdtor_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_QDTOR_HALT: - - /* has this queue already been stopped? */ - if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == - BNX2X_Q_LOGICAL_STATE_STOPPED) { - DP(BNX2X_MSG_IOV, - "Entered qdtor but queue was already stopped. Aborting gracefully\n"); - - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_DONE; - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - } - - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; - - q_params->cmd = BNX2X_Q_CMD_HALT; - vfop->rc = bnx2x_queue_state_change(bp, q_params); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_QDTOR_TERMINATE: - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; - - q_params->cmd = BNX2X_Q_CMD_TERMINATE; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_CFC_DEL}; + struct bnx2x_queue_state_params q_params; + int rc, i; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - case BNX2X_VFOP_QDTOR_CFCDEL: - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_DONE; + /* Prepare ramrod information */ + memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); + q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params->cmd = BNX2X_Q_CMD_CFC_DEL; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) { + DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); + goto out; + } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", - vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); -op_done: - case BNX2X_VFOP_QDTOR_DONE: - /* invalidate the context */ - if (qdtor->cxt) { - qdtor->cxt->ustorm_ag_context.cdu_usage = 0; - qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + /* Run Queue 'destruction' ramrods */ + for (i = 0; i < ARRAY_SIZE(cmds); i++) { + q_params.cmd = cmds[i]; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); + return rc; } - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); } -op_pending: - return; -} - -static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_queue_state_params *qstate = - &vf->op_params.qctor.qstate; - - memset(qstate, 0, sizeof(*qstate)); - qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); - - vfop->args.qdtor.qid = qid; - vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); - - bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, - bnx2x_vfop_qdtor, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, - cmd->block); - } else { - BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); - return -ENOMEM; +out: + /* Clean Context */ + if (bnx2x_vfq(vf, qid, cxt)) { + bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; + bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; } + + return 0; } static void @@ -496,751 +330,293 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) BP_VFDB(bp)->vf_sbs_pool++; } -/* VFOP MAC/VLAN helpers */ -static inline void bnx2x_vfop_credit(struct bnx2x *bp, - struct bnx2x_vfop *vfop, - struct bnx2x_vlan_mac_obj *obj) +static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *obj, + atomic_t *counter) { - struct bnx2x_vfop_args_filters *args = &vfop->args.filters; - - /* update credit only if there is no error - * and a valid credit counter - */ - if (!vfop->rc && args->credit) { - struct list_head *pos; - int read_lock; - int cnt = 0; - - read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); - if (read_lock) - DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); + struct list_head *pos; + int read_lock; + int cnt = 0; - list_for_each(pos, &obj->head) - cnt++; + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); - if (!read_lock) - bnx2x_vlan_mac_h_read_unlock(bp, obj); + list_for_each(pos, &obj->head) + cnt++; - atomic_set(args->credit, cnt); - } -} - -static int bnx2x_vfop_set_user_req(struct bnx2x *bp, - struct bnx2x_vfop_filter *pos, - struct bnx2x_vlan_mac_data *user_req) -{ - user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : - BNX2X_VLAN_MAC_DEL; + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); - switch (pos->type) { - case BNX2X_VFOP_FILTER_MAC: - memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); - break; - case BNX2X_VFOP_FILTER_VLAN: - user_req->u.vlan.vlan = pos->vid; - break; - default: - BNX2X_ERR("Invalid filter type, skipping\n"); - return 1; - } - return 0; + atomic_set(counter, cnt); } -static int bnx2x_vfop_config_list(struct bnx2x *bp, - struct bnx2x_vfop_filters *filters, - struct bnx2x_vlan_mac_ramrod_params *vlan_mac) +static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, bool drv_only, bool mac) { - struct bnx2x_vfop_filter *pos, *tmp; - struct list_head rollback_list, *filters_list = &filters->head; - struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; - int rc = 0, cnt = 0; - - INIT_LIST_HEAD(&rollback_list); - - list_for_each_entry_safe(pos, tmp, filters_list, link) { - if (bnx2x_vfop_set_user_req(bp, pos, user_req)) - continue; + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; - rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (rc >= 0) { - cnt += pos->add ? 1 : -1; - list_move(&pos->link, &rollback_list); - rc = 0; - } else if (rc == -EEXIST) { - rc = 0; - } else { - BNX2X_ERR("Failed to add a new vlan_mac command\n"); - break; - } - } + DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, + mac ? "MACs" : "VLANs"); - /* rollback if error or too many rules added */ - if (rc || cnt > filters->add_cnt) { - BNX2X_ERR("error or too many rules added. Performing rollback\n"); - list_for_each_entry_safe(pos, tmp, &rollback_list, link) { - pos->add = !pos->add; /* reverse op */ - bnx2x_vfop_set_user_req(bp, pos, user_req); - bnx2x_config_vlan_mac(bp, vlan_mac); - list_del(&pos->link); - } - cnt = 0; - if (!rc) - rc = -EINVAL; + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (mac) { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + } else { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); } - filters->add_cnt = cnt; - return rc; -} - -/* VFOP set VLAN/MAC */ -static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; - struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; - struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; - - enum bnx2x_vfop_vlan_mac_state state = vfop->state; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - bnx2x_vfop_reset_wq(vf); - - switch (state) { - case BNX2X_VFOP_VLAN_MAC_CLEAR: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do delete */ - vfop->rc = obj->delete_all(bp, obj, - &vlan_mac->user_req.vlan_mac_flags, - &vlan_mac->ramrod_flags); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do config */ - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (vfop->rc == -EEXIST) - vfop->rc = 0; + ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_MAC_CHK_DONE: - vfop->rc = !!obj->raw.check_pending(&obj->raw); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - - case BNX2X_VFOP_MAC_CONFIG_LIST: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do list config */ - vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); - if (vfop->rc) - goto op_err; - - set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_CONFIG_LIST: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do list config */ - vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); - if (!vfop->rc) { - set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); - default: - bnx2x_vfop_default(state); + /* Start deleting */ + rc = ramrod.vlan_mac_obj->delete_all(bp, + ramrod.vlan_mac_obj, + &ramrod.user_req.vlan_mac_flags, + &ramrod.ramrod_flags); + if (rc) { + BNX2X_ERR("Failed to delete all %s\n", + mac ? "MACs" : "VLANs"); + return rc; } -op_err: - BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); -op_done: - kfree(filters); - bnx2x_vfop_credit(bp, vfop, obj); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} - -struct bnx2x_vfop_vlan_mac_flags { - bool drv_only; - bool dont_consume; - bool single_cmd; - bool add; -}; - -static void -bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, - struct bnx2x_vfop_vlan_mac_flags *flags) -{ - struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; - - memset(ramrod, 0, sizeof(*ramrod)); - /* ramrod flags */ - if (flags->drv_only) - set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); - if (flags->single_cmd) - set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); + /* Clear the vlan counters */ + if (!mac) + atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); - /* mac_vlan flags */ - if (flags->dont_consume) - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); - - /* cmd */ - ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; -} - -static inline void -bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, - struct bnx2x_vfop_vlan_mac_flags *flags) -{ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); - set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); + return 0; } -static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, bool drv_only) +static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_mac_vlan_filter *filter, + bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + struct bnx2x_vlan_mac_ramrod_params ramrod; int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single */ - .credit = NULL, /* consume credit */ - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = false /* don't care */, - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); - - /* set extra args */ - vfop->args.filters = filters; - - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); + DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", + vf->abs_vfid, filter->add ? "Adding" : "Deleting", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (filter->type == BNX2X_VF_FILTER_VLAN) { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + ramrod.user_req.u.vlan.vlan = filter->vid; + } else { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); + } + ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : + BNX2X_VLAN_MAC_DEL; + + /* Verify there are available vlan credits */ + if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && + (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= + vf_vlan_rules_cnt(vf))) { + BNX2X_ERR("No credits for vlan\n"); + return -ENOMEM; } - return -ENOMEM; -} - -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *macs, - int qid, bool drv_only) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = macs, - .credit = NULL, /* consume credit */ - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = false, - .add = false, /* don't care since only the items in the - * filters list affect the sp operation, - * not the list itself - */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + + /* Add/Remove the filter */ + rc = bnx2x_config_vlan_mac(bp, &ramrod); + if (rc && rc != -EEXIST) { + BNX2X_ERR("Failed to %s %s\n", + filter->add ? "add" : "delete", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : + "VLAN"); + return rc; + } - /* set extra args */ - filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; - vfop->args.filters = filters; + /* Update the vlan counters */ + if (filter->type == BNX2X_VF_FILTER_VLAN) + bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, + &bnx2x_vfq(vf, qid, vlan_count)); - bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; + return 0; } -static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, u16 vid, bool add) +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - int rc; + int rc = 0, i; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single command */ - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = false, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = add, - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - ramrod->user_req.u.vlan.vlan = vid; - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - /* set extra args */ - vfop->args.filters = filters; + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); + /* Prepare ramrod params */ + for (i = 0; i < filters->count; i++) { + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], drv_only); + if (rc) + break; } - return -ENOMEM; -} - -static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, bool drv_only) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single command */ - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = false, /* don't care */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + /* Rollback if needed */ + if (i != filters->count) { + BNX2X_ERR("Managed only %d/%d filters - rolling back\n", + i, filters->count + 1); + while (--i >= 0) { + filters->filters[i].add = !filters->filters[i].add; + bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], + drv_only); + } + } - /* set extra args */ - vfop->args.filters = filters; + /* It's our responsibility to free the filters */ + kfree(filters); - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; + return rc; } -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *vlans, - int qid, bool drv_only) +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = vlans, - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = false, - .add = false, /* don't care */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); - - /* set extra args */ - filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - - atomic_read(filters.credit); - - vfop->args.filters = filters; + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; -} - -/* VFOP queue setup (queue constructor + set vlan 0) */ -static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qctor.qid; - enum bnx2x_vfop_qsetup_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_qsetup, - .block = false, - }; - - if (vfop->rc < 0) + rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); + if (rc) goto op_err; - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + /* Configure vlan0 for leading queue */ + if (!qid) { + struct bnx2x_vf_mac_vlan_filter filter; - switch (state) { - case BNX2X_VFOP_QSETUP_CTOR: - /* init the queue ctor command */ - vfop->state = BNX2X_VFOP_QSETUP_VLAN0; - vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); - if (vfop->rc) + memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); + filter.type = BNX2X_VF_FILTER_VLAN; + filter.add = true; + filter.vid = 0; + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); + if (rc) goto op_err; - return; - - case BNX2X_VFOP_QSETUP_VLAN0: - /* skip if non-leading or FPGA/EMU*/ - if (qid) - goto op_done; + } - /* init the queue set-vlan command (for vlan 0) */ - vfop->state = BNX2X_VFOP_QSETUP_DONE; - vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); - if (vfop->rc) - goto op_err; - return; + /* Schedule the configuration of any pending vlan filters */ + vf->cfg_flags |= VF_CFG_VLAN; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, + BNX2X_MSG_IOV); + return 0; op_err: - BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); -op_done: - case BNX2X_VFOP_QSETUP_DONE: - vf->cfg_flags |= VF_CFG_VLAN; - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } + BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; } -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) +static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; - if (vfop) { - vfop->args.qctor.qid = qid; + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, - bnx2x_vfop_qsetup, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, - cmd->block); + /* If needed, clean the filtering data base */ + if ((qid == LEADING_IDX) && + bnx2x_validate_vf_sp_objs(bp, vf, false)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); + if (rc) + goto op_err; } - return -ENOMEM; -} - -/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ -static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qx.qid; - enum bnx2x_vfop_qflr_state state = vfop->state; - struct bnx2x_queue_state_params *qstate; - struct bnx2x_vfop_cmd cmd; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); + /* Terminate queue */ + if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { + struct bnx2x_queue_state_params qstate; - cmd.done = bnx2x_vfop_qflr; - cmd.block = false; - - switch (state) { - case BNX2X_VFOP_QFLR_CLR_VLAN: - /* vlan-clear-all: driver-only, don't consume credit */ - vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; - - if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) { - /* the vlan_mac vfop will re-schedule us */ - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, - qid, true); - if (vfop->rc) - goto op_err; - return; - - } else { - /* need to reschedule ourselves */ - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - } - - case BNX2X_VFOP_QFLR_CLR_MAC: - /* mac-clear-all: driver only consume credit */ - vfop->state = BNX2X_VFOP_QFLR_TERMINATE; - if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) { - /* the vlan_mac vfop will re-schedule us */ - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, - qid, true); - if (vfop->rc) - goto op_err; - return; - - } else { - /* need to reschedule ourselves */ - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - } - - case BNX2X_VFOP_QFLR_TERMINATE: - qstate = &vfop->op_p->qctor.qstate; - memset(qstate , 0, sizeof(*qstate)); - qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); - vfop->state = BNX2X_VFOP_QFLR_DONE; - - DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", - vf->abs_vfid, qstate->q_obj->state); - - if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { - qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; - qstate->cmd = BNX2X_Q_CMD_TERMINATE; - vfop->rc = bnx2x_queue_state_change(bp, qstate); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); - } else { - goto op_done; - } - -op_err: - BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", - vf->abs_vfid, qid, vfop->rc); -op_done: - case BNX2X_VFOP_QFLR_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; + qstate.cmd = BNX2X_Q_CMD_TERMINATE; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) + goto op_err; } -op_pending: - return; -} - -static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = qid; - bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, - bnx2x_vfop_qflr, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, - cmd->block); - } - return -ENOMEM; + return 0; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; } -/* VFOP multi-casts */ -static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; - struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; - struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; - enum bnx2x_vfop_mcast_state state = vfop->state; - int i; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; + struct bnx2x_mcast_list_elem *mc = NULL; + struct bnx2x_mcast_ramrod_params mcast; + int rc, i; - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_MCAST_DEL: - /* clear existing mcasts */ - vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD - : BNX2X_VFOP_MCAST_CHK_DONE; - mcast->mcast_list_len = vf->mcast_list_len; - vf->mcast_list_len = args->mc_num; - vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_MCAST_ADD: - if (raw->check_pending(raw)) - goto op_pending; - - /* update mcast list on the ramrod params */ - INIT_LIST_HEAD(&mcast->mcast_list); - for (i = 0; i < args->mc_num; i++) - list_add_tail(&(args->mc[i].link), - &mcast->mcast_list); - mcast->mcast_list_len = args->mc_num; + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - /* add new mcasts */ - vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; - vfop->rc = bnx2x_config_mcast(bp, mcast, - BNX2X_MCAST_CMD_ADD); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - - case BNX2X_VFOP_MCAST_CHK_DONE: - vfop->rc = raw->check_pending(raw) ? 1 : 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - default: - bnx2x_vfop_default(state); + /* Prepare Multicast command */ + memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); + mcast.mcast_obj = &vf->mcast_obj; + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); + if (mc_num) { + mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), + GFP_KERNEL); + if (!mc) { + BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); + return -ENOMEM; + } } -op_err: - BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); -op_done: - kfree(args->mc); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - bnx2x_mac_addr_t *mcasts, - int mcast_num, bool drv_only) -{ - struct bnx2x_vfop *vfop = NULL; - size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); - struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : - NULL; - - if (!mc_sz || mc) { - vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - int i; - struct bnx2x_mcast_ramrod_params *ramrod = - &vf->op_params.mcast; - - /* set ramrod params */ - memset(ramrod, 0, sizeof(*ramrod)); - ramrod->mcast_obj = &vf->mcast_obj; - if (drv_only) - set_bit(RAMROD_DRV_CLR_ONLY, - &ramrod->ramrod_flags); - - /* copy mcasts pointers */ - vfop->args.mc_list.mc_num = mcast_num; - vfop->args.mc_list.mc = mc; - for (i = 0; i < mcast_num; i++) - mc[i].mac = mcasts[i]; - - bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, - bnx2x_vfop_mcast, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, - cmd->block); - } else { + /* clear existing mcasts */ + mcast.mcast_list_len = vf->mcast_list_len; + vf->mcast_list_len = mc_num; + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); + if (rc) { + BNX2X_ERR("Failed to remove multicasts\n"); + if (mc) kfree(mc); - } + return rc; } - return -ENOMEM; -} - -/* VFOP rx-mode */ -static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; - enum bnx2x_vfop_rxmode_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_RXMODE_CONFIG: - /* next state */ - vfop->state = BNX2X_VFOP_RXMODE_DONE; + /* update mcast list on the ramrod params */ + if (mc_num) { + INIT_LIST_HEAD(&mcast.mcast_list); + for (i = 0; i < mc_num; i++) { + mc[i].mac = mcasts[i]; + list_add_tail(&mc[i].link, + &mcast.mcast_list); + } - /* record the accept flags in vfdb so hypervisor can modify them - * if necessary - */ - bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = - ramrod->rx_accept_flags; - vfop->rc = bnx2x_config_rx_mode(bp, ramrod); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); -op_done: - case BNX2X_VFOP_RXMODE_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); + /* add new mcasts */ + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); + if (rc) + BNX2X_ERR("Faled to add multicasts\n"); + kfree(mc); } -op_pending: - return; + + return rc; } static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, @@ -1268,118 +644,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); } -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, unsigned long accept_flags) +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_rx_mode_ramrod_params *ramrod = - &vf->op_params.rx_mode; + struct bnx2x_rx_mode_ramrod_params ramrod; - bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, - bnx2x_vfop_rxmode, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, - cmd->block); - } - return -ENOMEM; + bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; + return bnx2x_config_rx_mode(bp, &ramrod); } -/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, - * queue destructor) - */ -static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qx.qid; - enum bnx2x_vfop_qteardown_state state = vfop->state; - struct bnx2x_vfop_cmd cmd; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - cmd.done = bnx2x_vfop_qdown; - cmd.block = false; - - switch (state) { - case BNX2X_VFOP_QTEARDOWN_RXMODE: - /* Drop all */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; - vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); - if (vfop->rc) - goto op_err; - return; - - case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: - /* vlan-clear-all: don't consume credit */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); - if (vfop->rc) - goto op_err; - return; - - case BNX2X_VFOP_QTEARDOWN_CLR_MAC: - /* mac-clear-all: consume credit */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); - if (vfop->rc) - goto op_err; - return; + int rc; - case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: - vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; - vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); - if (vfop->rc) - goto op_err; - return; + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - case BNX2X_VFOP_QTEARDOWN_QDTOR: - /* run the queue destruction flow */ - DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); - vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; - DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); - vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); - DP(BNX2X_MSG_IOV, "returned from cmd\n"); - if (vfop->rc) + /* Remove all classification configuration for leading queue */ + if (qid == LEADING_IDX) { + rc = bnx2x_vf_rxmode(bp, vf, qid, 0); + if (rc) goto op_err; - return; -op_err: - BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", - vf->abs_vfid, qid, vfop->rc); - - case BNX2X_VFOP_QTEARDOWN_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } -} -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - /* for non leading queues skip directly to qdown sate */ - if (vfop) { - vfop->args.qx.qid = qid; - bnx2x_vfop_opset(qid == LEADING_IDX ? - BNX2X_VFOP_QTEARDOWN_RXMODE : - BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, - cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, - cmd->block); + /* Remove filtering if feasible */ + if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, true); + if (rc) + goto op_err; + rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); + if (rc) + goto op_err; + } } - return -ENOMEM; + /* Destroy queue */ + rc = bnx2x_vf_queue_destroy(bp, vf, qid); + if (rc) + goto op_err; + return rc; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", + vf->abs_vfid, qid, rc); + return rc; } /* VF enable primitives @@ -1579,120 +893,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_tx_hw_flushed(bp, poll_cnt); } -static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) +static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; - enum bnx2x_vfop_flr_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_flr, - .block = false, - }; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + int rc, i; - switch (state) { - case BNX2X_VFOP_FLR_QUEUES: - /* the cleanup operations are valid if and only if the VF - * was first acquired. - */ - if (++(qx->qid) < vf_rxq_count(vf)) { - vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, - qx->qid); - if (vfop->rc) - goto op_err; - return; - } - /* remove multicasts */ - vfop->state = BNX2X_VFOP_FLR_HW; - vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, - 0, true); - if (vfop->rc) - goto op_err; - return; - case BNX2X_VFOP_FLR_HW: + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - /* dispatch final cleanup and wait for HW queues to flush */ - bnx2x_vf_flr_clnup_hw(bp, vf); + /* the cleanup operations are valid if and only if the VF + * was first acquired. + */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_flr(bp, vf, i); + if (rc) + goto out; + } - /* release VF resources */ - bnx2x_vf_free_resc(bp, vf); + /* remove multicasts */ + bnx2x_vf_mcast(bp, vf, NULL, 0, true); - /* re-open the mailbox */ - bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + /* dispatch final cleanup and wait for HW queues to flush */ + bnx2x_vf_flr_clnup_hw(bp, vf); - goto op_done; - default: - bnx2x_vfop_default(state); - } -op_err: - BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: - vf->flr_clnup_stage = VF_FLR_ACK; - bnx2x_vfop_end(bp, vf, vfop); - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); -} + /* release VF resources */ + bnx2x_vf_free_resc(bp, vf); -static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - vfop_handler_t done) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = -1; /* loop */ - bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, - bnx2x_vfop_flr, done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); - } - return -ENOMEM; + /* re-open the mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + return; +out: + BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", + vf->abs_vfid, i, rc); } -static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) +static void bnx2x_vf_flr_clnup(struct bnx2x *bp) { - int i = prev_vf ? prev_vf->index + 1 : 0; struct bnx2x_virtf *vf; + int i; - /* find next VF to cleanup */ -next_vf_to_clean: - for (; - i < BNX2X_NR_VIRTFN(bp) && - (bnx2x_vf(bp, i, state) != VF_RESET || - bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); - i++) - ; + for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { + /* VF should be RESET & in FLR cleanup states */ + if (bnx2x_vf(bp, i, state) != VF_RESET || + !bnx2x_vf(bp, i, flr_clnup_stage)) + continue; - DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, - BNX2X_NR_VIRTFN(bp)); + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", + i, BNX2X_NR_VIRTFN(bp)); - if (i < BNX2X_NR_VIRTFN(bp)) { vf = BP_VF(bp, i); /* lock the vf pf channel */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); /* invoke the VF FLR SM */ - if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { - BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", - vf->abs_vfid); + bnx2x_vf_flr(bp, vf); - /* mark the VF to be ACKED and continue */ - vf->flr_clnup_stage = VF_FLR_ACK; - goto next_vf_to_clean; - } - return; - } - - /* we are done, update vf records */ - for_each_vf(bp, i) { - vf = BP_VF(bp, i); - - if (vf->flr_clnup_stage != VF_FLR_ACK) - continue; - - vf->flr_clnup_stage = VF_FLR_EPILOG; + /* mark the VF to be ACKED and continue */ + vf->flr_clnup_stage = false; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); } /* Acknowledge the handled VFs. @@ -1742,7 +999,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) if (reset) { /* set as reset and ready for cleanup */ vf->state = VF_RESET; - vf->flr_clnup_stage = VF_FLR_CLN; + vf->flr_clnup_stage = true; DP(BNX2X_MSG_IOV, "Initiating Final cleanup for VF %d\n", @@ -1751,7 +1008,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) } /* do the FLR cleanup for all marked VFs*/ - bnx2x_vf_flr_clnup(bp, NULL); + bnx2x_vf_flr_clnup(bp); } /* IOV global initialization routines */ @@ -2018,7 +1275,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_vf(bp, i, index) = i; bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; bnx2x_vf(bp, i, state) = VF_FREE; - INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); mutex_init(&bnx2x_vf(bp, i, op_mutex)); bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; } @@ -2039,6 +1295,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, goto failed; } + /* Prepare the VFs event synchronization mechanism */ + mutex_init(&bp->vfdb->event_mutex); + return 0; failed: DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); @@ -2117,7 +1376,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp) cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); if (cxt->size) { - BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); + cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); + if (!cxt->addr) + goto alloc_mem_err; } else { cxt->addr = NULL; cxt->mapping = 0; @@ -2127,20 +1388,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp) /* allocate vfs ramrods dma memory - client_init and set_mac */ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); - BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, - tot_size); + BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, + tot_size); + if (!BP_VFDB(bp)->sp_dma.addr) + goto alloc_mem_err; BP_VFDB(bp)->sp_dma.size = tot_size; /* allocate mailboxes */ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; - BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, - tot_size); + BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, + tot_size); + if (!BP_VF_MBX_DMA(bp)->addr) + goto alloc_mem_err; + BP_VF_MBX_DMA(bp)->size = tot_size; /* allocate local bulletin boards */ tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; - BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, - &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); + BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, + tot_size); + if (!BP_VF_BULLETIN_DMA(bp)->addr) + goto alloc_mem_err; + BP_VF_BULLETIN_DMA(bp)->size = tot_size; return 0; @@ -2166,6 +1435,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_sp_map(bp, vf, q_data), q_type); + /* sp indication is set only when vlan/mac/etc. are initialized */ + q->sp_initialized = false; + DP(BNX2X_MSG_IOV, "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", vf->abs_vfid, q->sp_obj.func_id, q->cid); @@ -2269,7 +1541,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp) /* release all the VFs */ for_each_vf(bp, i) - bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ + bnx2x_vf_release(bp, BP_VF(bp, i)); return 0; } @@ -2359,6 +1631,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, smp_mb__after_clear_bit(); } +static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); +} + int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) { struct bnx2x_virtf *vf; @@ -2383,6 +1661,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) case EVENT_RING_OPCODE_CLASSIFICATION_RULES: case EVENT_RING_OPCODE_MULTICAST_RULES: case EVENT_RING_OPCODE_FILTERS_RULES: + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: cid = (elem->message.data.eth_event.echo & BNX2X_SWCID_MASK); DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); @@ -2447,13 +1726,15 @@ get_vf: vf->abs_vfid, qidx); bnx2x_vf_handle_filters_eqe(bp, vf); break; + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_rss_update_eqe(bp, vf); case EVENT_RING_OPCODE_VF_FLR: case EVENT_RING_OPCODE_MALICIOUS_VF: /* Do nothing for now */ return 0; } - /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid, false); return 0; } @@ -2490,23 +1771,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, } } -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) -{ - struct bnx2x_virtf *vf; - - /* check if the cid is the VF range */ - if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) - return; - - vf = bnx2x_vf_by_cid(bp, vf_cid); - if (vf) { - /* set in_progress flag */ - atomic_set(&vf->op_in_progress, 1); - if (queue_work) - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); - } -} - void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) { int i; @@ -2527,10 +1791,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - (is_fcoe ? 0 : 1); - DP(BNX2X_MSG_IOV, - "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", - BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, - first_queue_query_index + num_queues_req); + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", + BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, + first_queue_query_index + num_queues_req); cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats) + @@ -2544,9 +1808,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) struct bnx2x_virtf *vf = BP_VF(bp, i); if (vf->state != VF_ENABLED) { - DP(BNX2X_MSG_IOV, - "vf %d not enabled so no stats for it\n", - vf->abs_vfid); + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "vf %d not enabled so no stats for it\n", + vf->abs_vfid); continue; } @@ -2588,32 +1852,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; } -void bnx2x_iov_sp_task(struct bnx2x *bp) -{ - int i; - - if (!IS_SRIOV(bp)) - return; - /* Iterate over all VFs and invoke state transition for VFs with - * 'in-progress' slow-path operations - */ - DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); - for_each_vf(bp, i) { - struct bnx2x_virtf *vf = BP_VF(bp, i); - - if (!vf) { - BNX2X_ERR("VF was null! skipping...\n"); - continue; - } - - if (!list_empty(&vf->op_list_head) && - atomic_read(&vf->op_in_progress)) { - DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); - bnx2x_vfop_cur(bp, vf)->transition(bp, vf); - } - } -} - static inline struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) { @@ -2849,52 +2087,26 @@ static void bnx2x_set_vf_state(void *cookie) p->vf->state = p->state; } -/* VFOP close (teardown the queues, delete mcasts and close HW) */ -static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; - enum bnx2x_vfop_close_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_close, - .block = false, - }; + int rc = 0, i; - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_CLOSE_QUEUES: - - if (++(qx->qid) < vf_rxq_count(vf)) { - vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); - if (vfop->rc) - goto op_err; - return; - } - vfop->state = BNX2X_VFOP_CLOSE_HW; - vfop->rc = 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - case BNX2X_VFOP_CLOSE_HW: - - /* disable the interrupts */ - DP(BNX2X_MSG_IOV, "disabling igu\n"); - bnx2x_vf_igu_disable(bp, vf); + /* Close all queues */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_teardown(bp, vf, i); + if (rc) + goto op_err; + } - /* disable the VF */ - DP(BNX2X_MSG_IOV, "clearing qtbl\n"); - bnx2x_vf_clr_qtbl(bp, vf); + /* disable the interrupts */ + DP(BNX2X_MSG_IOV, "disabling igu\n"); + bnx2x_vf_igu_disable(bp, vf); - goto op_done; - default: - bnx2x_vfop_default(state); - } -op_err: - BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: + /* disable the VF */ + DP(BNX2X_MSG_IOV, "clearing qtbl\n"); + bnx2x_vf_clr_qtbl(bp, vf); /* need to make sure there are no outstanding stats ramrods which may * cause the device to access the VF's stats buffer which it will free @@ -2909,43 +2121,20 @@ op_done: } DP(BNX2X_MSG_IOV, "set state to acquired\n"); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - /* Not supported at the moment; Exists for macros only */ - return; -} -int bnx2x_vfop_close_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = -1; /* loop */ - bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, - bnx2x_vfop_close, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, - cmd->block); - } - return -ENOMEM; + return 0; +op_err: + BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); + return rc; } /* VF release can be called either: 1. The VF was acquired but * not enabled 2. the vf was enabled or in the process of being * enabled */ -static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_release, - .block = false, - }; - - DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); - - if (vfop->rc < 0) - goto op_err; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, vf->state == VF_FREE ? "Free" : @@ -2956,116 +2145,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) switch (vf->state) { case VF_ENABLED: - vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); - if (vfop->rc) + rc = bnx2x_vf_close(bp, vf); + if (rc) goto op_err; - return; - + /* Fallthrough to release resources */ case VF_ACQUIRED: DP(BNX2X_MSG_IOV, "about to free resources\n"); bnx2x_vf_free_resc(bp, vf); - DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); - goto op_done; + break; case VF_FREE: case VF_RESET: - /* do nothing */ - goto op_done; default: - bnx2x_vfop_default(vf->state); + break; } + return 0; op_err: - BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: - bnx2x_vfop_end(bp, vf, vfop); + BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); + return rc; } -static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - enum bnx2x_vfop_rss_state state; - - if (!vfop) { - BNX2X_ERR("vfop was null\n"); - return; - } - - state = vfop->state; - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_RSS_CONFIG: - /* next state */ - vfop->state = BNX2X_VFOP_RSS_DONE; - bnx2x_config_rss(bp, &vfop->op_p->rss); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("RSS error: rc %d\n", vfop->rc); -op_done: - case BNX2X_VFOP_RSS_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } -op_pending: - return; + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); + return bnx2x_config_rss(bp, rss); } -int bnx2x_vfop_release_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - bnx2x_vfop_opset(-1, /* use vf->state */ - bnx2x_vfop_release, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, - cmd->block); - } - return -ENOMEM; -} + aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; + struct bnx2x_queue_state_params qstate; + int qid, rc = 0; -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Set ramrod params */ + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + memcpy(&qstate.params.update_tpa, params, + sizeof(struct bnx2x_queue_update_tpa_params)); + qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); - if (vfop) { - bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, - cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, - cmd->block); + for (qid = 0; qid < vf_rxq_count(vf); qid++) { + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.params.update_tpa.sge_map = sge_addr[qid]; + DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", + vf->abs_vfid, qid, U64_HI(sge_addr[qid]), + U64_LO(sge_addr[qid])); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) { + BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", + U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), + vf->abs_vfid, qid); + return rc; + } } - return -ENOMEM; + + return rc; } /* VF release ~ VF close + VF release-resources * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop_cmd cmd = { - .done = NULL, - .block = block, - }; int rc; DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); - rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); + rc = bnx2x_vf_free(bp, vf); if (rc) WARN(rc, "VF[%d] Failed to allocate resources for release op- rc=%d\n", vf->abs_vfid, rc); + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + return rc; } static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, @@ -3074,16 +2234,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, *sbdf = vf->devfn | (vf->bus << 8); } -static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, - struct bnx2x_vf_bar_info *bar_info) -{ - int n; - - bar_info->nr_bars = bp->vfdb->sriov.nres; - for (n = 0; n < bar_info->nr_bars; n++) - bar_info->bars[n] = vf->bars[n]; -} - void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { @@ -3405,13 +2555,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->spoofchk = 1; /*always enabled */ if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ - if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) + if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 0, ETH_ALEN); - if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 0, VLAN_HLEN); + } } else { /* mac */ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) @@ -3485,17 +2635,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the mac in device on this vf's queue */ unsigned long ramrod_flags = 0; - struct bnx2x_vlan_mac_obj *mac_obj = - &bnx2x_leading_vfq(vf, mac_obj); + struct bnx2x_vlan_mac_obj *mac_obj; - rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); - if (rc) - return rc; + /* User should be able to see failure reason in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; /* must lock vfpf channel to protect against vf flows */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); /* remove existing eth macs */ + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); if (rc) { BNX2X_ERR("failed to delete eth macs\n"); @@ -3569,17 +2719,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) BNX2X_Q_LOGICAL_STATE_ACTIVE) return rc; - /* configure the vlan in device on this vf's queue */ - vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); - rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); - if (rc) - return rc; + /* User should be able to see error in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; /* must lock vfpf channel to protect against vf flows */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); /* remove existing vlans */ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, &ramrod_flags); if (rc) { @@ -3736,13 +2885,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp) bnx2x_sample_bulletin(bp); /* if channel is down we need to self destruct */ - if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - } + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + BNX2X_MSG_IOV); } void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) @@ -3756,12 +2901,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) mutex_init(&bp->vf2pf_mutex); /* allocate vf2pf mailbox for vf to pf channel */ - BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, - sizeof(struct bnx2x_vf_mbx_msg)); + bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + if (!bp->vf2pf_mbox) + goto alloc_mem_err; /* allocate pf 2 vf bulletin board */ - BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, - sizeof(union pf_vf_bulletin)); + bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); + if (!bp->pf2vf_bulletin) + goto alloc_mem_err; return 0; @@ -3792,3 +2941,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp) bnx2x_post_vf_bulletin(bp, vf_idx); } } + +void bnx2x_iov_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); + + if (!netif_running(bp->dev)) + return; + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, + &bp->iov_task_state)) + bnx2x_vf_handle_flr_event(bp); + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, + &bp->iov_task_state)) + bnx2x_vf_mbx(bp); +} + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) +{ + smp_mb__before_clear_bit(); + set_bit(flag, &bp->iov_task_state); + smp_mb__after_clear_bit(); + DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d9fcca1b5a9d..8bf764570eef 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -30,6 +30,8 @@ enum sample_bulletin_result { #ifdef CONFIG_BNX2X_SRIOV +extern struct workqueue_struct *bnx2x_iov_wq; + /* The bnx2x device structure holds vfdb structure described below. * The VF array is indexed by the relative vfid. */ @@ -83,108 +85,35 @@ struct bnx2x_vf_queue { u16 index; u16 sb_idx; bool is_leading; + bool sp_initialized; }; -/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: - * q-init, q-setup and SB index +/* struct bnx2x_vf_queue_construct_params - prepare queue construction + * parameters: q-init, q-setup and SB index */ -struct bnx2x_vfop_qctor_params { +struct bnx2x_vf_queue_construct_params { struct bnx2x_queue_state_params qstate; struct bnx2x_queue_setup_params prep_qsetup; }; -/* VFOP parameters (one copy per VF) */ -union bnx2x_vfop_params { - struct bnx2x_vlan_mac_ramrod_params vlan_mac; - struct bnx2x_rx_mode_ramrod_params rx_mode; - struct bnx2x_mcast_ramrod_params mcast; - struct bnx2x_config_rss_params rss; - struct bnx2x_vfop_qctor_params qctor; -}; - /* forward */ struct bnx2x_virtf; /* VFOP definitions */ -typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); - -struct bnx2x_vfop_cmd { - vfop_handler_t done; - bool block; -}; -/* VFOP queue filters command additional arguments */ -struct bnx2x_vfop_filter { - struct list_head link; +struct bnx2x_vf_mac_vlan_filter { int type; -#define BNX2X_VFOP_FILTER_MAC 1 -#define BNX2X_VFOP_FILTER_VLAN 2 +#define BNX2X_VF_FILTER_MAC 1 +#define BNX2X_VF_FILTER_VLAN 2 bool add; u8 *mac; u16 vid; }; -struct bnx2x_vfop_filters { - int add_cnt; - struct list_head head; - struct bnx2x_vfop_filter filters[]; -}; - -/* transient list allocated, built and saved until its - * passed to the SP-VERBs layer. - */ -struct bnx2x_vfop_args_mcast { - int mc_num; - struct bnx2x_mcast_list_elem *mc; -}; - -struct bnx2x_vfop_args_qctor { - int qid; - u16 sb_idx; -}; - -struct bnx2x_vfop_args_qdtor { - int qid; - struct eth_context *cxt; -}; - -struct bnx2x_vfop_args_defvlan { - int qid; - bool enable; - u16 vid; - u8 prio; -}; - -struct bnx2x_vfop_args_qx { - int qid; - bool en_add; -}; - -struct bnx2x_vfop_args_filters { - struct bnx2x_vfop_filters *multi_filter; - atomic_t *credit; /* non NULL means 'don't consume credit' */ -}; - -union bnx2x_vfop_args { - struct bnx2x_vfop_args_mcast mc_list; - struct bnx2x_vfop_args_qctor qctor; - struct bnx2x_vfop_args_qdtor qdtor; - struct bnx2x_vfop_args_defvlan defvlan; - struct bnx2x_vfop_args_qx qx; - struct bnx2x_vfop_args_filters filters; -}; - -struct bnx2x_vfop { - struct list_head link; - int rc; /* return code */ - int state; /* next state */ - union bnx2x_vfop_args args; /* extra arguments */ - union bnx2x_vfop_params *op_p; /* ramrod params */ - - /* state machine callbacks */ - vfop_handler_t transition; - vfop_handler_t done; +struct bnx2x_vf_mac_vlan_filters { + int count; + struct bnx2x_vf_mac_vlan_filter filters[]; }; /* vf context */ @@ -204,15 +133,7 @@ struct bnx2x_virtf { #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ - /* non 0 during flr cleanup */ - u8 flr_clnup_stage; -#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup' - * sans the end-wait - */ -#define VF_FLR_ACK 2 /* ACK flr notification */ -#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW - * ~ final cleanup' end wait - */ + bool flr_clnup_stage; /* true during flr cleanup */ /* dma */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ @@ -276,11 +197,6 @@ struct bnx2x_virtf { struct bnx2x_rss_config_obj rss_conf_obj; /* slow-path operations */ - atomic_t op_in_progress; - int op_rc; - bool op_wait_blocking; - struct list_head op_list_head; - union bnx2x_vfop_params op_params; struct mutex op_mutex; /* one vfop at a time mutex */ enum channel_tlvs op_current; }; @@ -338,11 +254,6 @@ struct bnx2x_vf_mbx { u32 vf_addr_hi; struct vfpf_first_tlv first_tlv; /* saved VF request header */ - - u8 flags; -#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent - * more then one pending msg - */ }; struct bnx2x_vf_sp { @@ -419,6 +330,10 @@ struct bnx2x_vfdb { /* the number of msix vectors belonging to this PF designated for VFs */ u16 vf_sbs_pool; u16 first_vf_igu_entry; + + /* sp_rtnl synchronization */ + struct mutex event_mutex; + u64 event_occur; }; /* queue access */ @@ -468,13 +383,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp); void bnx2x_iov_init_dmae(struct bnx2x *bp); void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj); -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); void bnx2x_iov_storm_stats_update(struct bnx2x *bp); -void bnx2x_iov_sp_task(struct bnx2x *bp); /* global vf mailbox routines */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); +void bnx2x_vf_mbx(struct bnx2x *bp); +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event); void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); /* CORE VF API */ @@ -487,162 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map); -/* VFOP generic helpers */ -#define bnx2x_vfop_default(state) do { \ - BNX2X_ERR("Bad state %d\n", (state)); \ - vfop->rc = -EINVAL; \ - goto op_err; \ - } while (0) - -enum { - VFOP_DONE, - VFOP_CONT, - VFOP_VERIFY_PEND, -}; - -#define bnx2x_vfop_finalize(vf, rc, next) do { \ - if ((rc) < 0) \ - goto op_err; \ - else if ((rc) > 0) \ - goto op_pending; \ - else if ((next) == VFOP_DONE) \ - goto op_done; \ - else if ((next) == VFOP_VERIFY_PEND) \ - BNX2X_ERR("expected pending\n"); \ - else { \ - DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ - atomic_set(&vf->op_in_progress, 1); \ - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ - return; \ - } \ - } while (0) - -#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \ - do { \ - vfop->state = first_state; \ - vfop->op_p = &vf->op_params; \ - vfop->transition = trans_hndlr; \ - vfop->done = done_hndlr; \ - } while (0) - -static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); - WARN_ON(list_empty(&vf->op_list_head)); - return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); -} - -static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); - - WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); - if (vfop) { - INIT_LIST_HEAD(&vfop->link); - list_add(&vfop->link, &vf->op_list_head); - } - return vfop; -} - -static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, - struct bnx2x_vfop *vfop) -{ - /* rc < 0 - error, otherwise set to 0 */ - DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); - if (vfop->rc >= 0) - vfop->rc = 0; - DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); - - /* unlink the current op context and propagate error code - * must be done before invoking the 'done()' handler - */ - WARN(!mutex_is_locked(&vf->op_mutex), - "about to access vf op linked list but mutex was not locked!"); - list_del(&vfop->link); - - if (list_empty(&vf->op_list_head)) { - DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); - vf->op_rc = vfop->rc; - DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - } else { - struct bnx2x_vfop *cur_vfop; - - DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); - cur_vfop = bnx2x_vfop_cur(bp, vf); - cur_vfop->rc = vfop->rc; - DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - } - - /* invoke done handler */ - if (vfop->done) { - DP(BNX2X_MSG_IOV, "calling done handler\n"); - vfop->done(bp, vf); - } else { - /* there is no done handler for the operation to unlock - * the mutex. Must have gotten here from PF initiated VF RELEASE - */ - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); - } - - DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - - /* if this is the last nested op reset the wait_blocking flag - * to release any blocking wrappers, only after 'done()' is invoked - */ - if (list_empty(&vf->op_list_head)) { - DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); - vf->op_wait_blocking = false; - } - - kfree(vfop); -} - -static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - /* can take a while if any port is running */ - int cnt = 5000; - - might_sleep(); - while (cnt--) { - if (vf->op_wait_blocking == false) { -#ifdef BNX2X_STOP_ON_ERROR - DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt); -#endif - return 0; - } - usleep_range(1000, 2000); - - if (bp->panic) - return -EIO; - } - - /* timeout! */ -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#endif - - return -EBUSY; -} - -static inline int bnx2x_vfop_transition(struct bnx2x *bp, - struct bnx2x_virtf *vf, - vfop_handler_t transition, - bool block) -{ - if (block) - vf->op_wait_blocking = true; - transition(bp, vf); - if (block) - return bnx2x_vfop_wait_blocking(bp, vf); - return 0; -} - /* VFOP queue construction helpers */ void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, @@ -657,59 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_prep(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q, - struct bnx2x_vfop_qctor_params *p, + struct bnx2x_vf_queue_construct_params *p, unsigned long q_type); -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *macs, - int qid, bool drv_only); - -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *vlans, - int qid, bool drv_only); - -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid); - -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid); - -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - bnx2x_mac_addr_t *mcasts, - int mcast_num, bool drv_only); - -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, unsigned long accept_flags); - -int bnx2x_vfop_close_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); - -int bnx2x_vfop_release_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only); + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor); + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags); + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss); + +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params); /* VF release ~ VF close + VF release-resources * * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); @@ -772,18 +513,20 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); void bnx2x_iov_channel_down(struct bnx2x *bp); +void bnx2x_iov_task(struct work_struct *work); + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); + #else /* CONFIG_BNX2X_SRIOV */ static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj) {} -static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, - bool queue_work) {} static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) {return 1; } -static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} -static inline void bnx2x_vf_mbx(struct bnx2x *bp, - struct vf_pf_event_data *vfpf_event) {} +static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} +static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) {} static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } @@ -830,5 +573,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} +static inline void bnx2x_iov_task(struct work_struct *work) {} +static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} + #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 3fa6c2a2a5a9..0622884596b2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, vf->leading_rss = cl_id; q->is_leading = true; + q->sp_initialized = true; } /* ask the pf to open a queue for the vf */ @@ -672,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) out: bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; } @@ -894,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); - switch (mode) { - case BNX2X_RX_MODE_NONE: /* no Rx */ + /* Ignore everything accept MODE_NONE */ + if (mode == BNX2X_RX_MODE_NONE) { req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; - break; - case BNX2X_RX_MODE_NORMAL: + } else { + /* Current PF driver will not look at the specific flags, + * but they are required when working with older drivers on hv. + */ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - case BNX2X_RX_MODE_ALLMULTI: - req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - case BNX2X_RX_MODE_PROMISC: - req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - default: - BNX2X_ERR("BAD rx mode (%d)\n", mode); - rc = -EINVAL; - goto out; } req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; @@ -937,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); rc = -EINVAL; } -out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); return rc; @@ -1047,7 +1036,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, } static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, - struct bnx2x_virtf *vf) + struct bnx2x_virtf *vf, + int vf_rc) { struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; @@ -1059,7 +1049,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); - resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); + resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc); /* send response */ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + @@ -1088,9 +1078,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, storm_memset_vf_mbx_ack(bp, vf->abs_vfid); mmiowb(); - /* initiate dmae to send the response */ - mbx->flags &= ~VF_MSG_INPROCESS; - /* copy the response header including status-done field, * must be last dmae, must be after FW is acked */ @@ -1110,14 +1097,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, return; mbx_error: - bnx2x_vf_release(bp, vf, false); /* non blocking */ + bnx2x_vf_release(bp, vf); } static void bnx2x_vf_mbx_resp(struct bnx2x *bp, - struct bnx2x_virtf *vf) + struct bnx2x_virtf *vf, + int rc) { bnx2x_vf_mbx_resp_single_tlv(bp, vf); - bnx2x_vf_mbx_resp_send_msg(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); } static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, @@ -1159,7 +1147,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | - /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); + PFVF_CAP_TPA | + PFVF_CAP_TPA_UPDATE); bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, sizeof(resp->pfdev_info.fw_ver)); @@ -1240,8 +1229,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, sizeof(struct channel_list_end_tlv)); /* send the response */ - vf->op_rc = vfop_status; - bnx2x_vf_mbx_resp_send_msg(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); } static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1273,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { struct vfpf_init_tlv *init = &mbx->msg->req.init; + int rc; /* record ghost addresses from vf message */ vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; vf->stats_stride = init->stats_stride; - vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); /* set VF multiqueue statistics collection mode */ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) vf->cfg_flags |= VF_CFG_STATS_COALESCE; /* response */ - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } /* convert MBX queue-flags to standard SP queue-flags */ @@ -1320,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + struct bnx2x_vf_queue_construct_params qctor; + int rc = 0; /* verify vf_qid */ if (setup_q->vf_qid >= vf_rxq_count(vf)) { BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", setup_q->vf_qid, vf_rxq_count(vf)); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto response; } @@ -1347,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_leading_vfq_init(bp, vf, q); /* re-init the VF operation context */ - memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); - setup_p = &vf->op_params.qctor.prep_qsetup; - init_p = &vf->op_params.qctor.qstate.params.init; + memset(&qctor, 0 , + sizeof(struct bnx2x_vf_queue_construct_params)); + setup_p = &qctor.prep_qsetup; + init_p = &qctor.qstate.params.init; /* activate immediately */ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); @@ -1435,44 +1423,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, q->index, q->sb_idx); } /* complete the preparations */ - bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); + bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); - vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); - if (vf->op_rc) + rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); + if (rc) goto response; - return; } response: - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } -enum bnx2x_vfop_filters_state { - BNX2X_VFOP_MBX_Q_FILTERS_MACS, - BNX2X_VFOP_MBX_Q_FILTERS_VLANS, - BNX2X_VFOP_MBX_Q_FILTERS_RXMODE, - BNX2X_VFOP_MBX_Q_FILTERS_MCAST, - BNX2X_VFOP_MBX_Q_FILTERS_DONE -}; - static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, struct bnx2x_virtf *vf, struct vfpf_set_q_filters_tlv *tlv, - struct bnx2x_vfop_filters **pfl, + struct bnx2x_vf_mac_vlan_filters **pfl, u32 type_flag) { int i, j; - struct bnx2x_vfop_filters *fl = NULL; + struct bnx2x_vf_mac_vlan_filters *fl = NULL; size_t fsz; - fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + - sizeof(struct bnx2x_vfop_filters); + fsz = tlv->n_mac_vlan_filters * + sizeof(struct bnx2x_vf_mac_vlan_filter) + + sizeof(struct bnx2x_vf_mac_vlan_filters); fl = kzalloc(fsz, GFP_KERNEL); if (!fl) return -ENOMEM; - INIT_LIST_HEAD(&fl->head); - for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; @@ -1480,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, continue; if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { fl->filters[j].mac = msg_filter->mac; - fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; + fl->filters[j].type = BNX2X_VF_FILTER_MAC; } else { fl->filters[j].vid = msg_filter->vlan_tag; - fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; + fl->filters[j].type = BNX2X_VF_FILTER_VLAN; } fl->filters[j].add = (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? true : false; - list_add_tail(&fl->filters[j++].link, &fl->head); + fl->count++; } - if (list_empty(&fl->head)) + if (!fl->count) kfree(fl); else *pfl = fl; @@ -1530,180 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID -static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) { - int rc; + int rc = 0; struct vfpf_set_q_filters_tlv *msg = &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - enum bnx2x_vfop_filters_state state = vfop->state; - - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_mbx_qfilters, - .block = false, - }; - - DP(BNX2X_MSG_IOV, "STATE: %d\n", state); - - if (vfop->rc < 0) - goto op_err; + /* check for any mac/vlan changes */ + if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { + /* build mac list */ + struct bnx2x_vf_mac_vlan_filters *fl = NULL; - switch (state) { - case BNX2X_VFOP_MBX_Q_FILTERS_MACS: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_MAC_FILTER); + if (rc) + goto op_err; - /* check for any vlan/mac changes */ - if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build mac list */ - struct bnx2x_vfop_filters *fl = NULL; + if (fl) { - vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_MAC_FILTER); - if (vfop->rc) + /* set mac list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) goto op_err; - - if (fl) { - /* set mac list */ - rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl, - msg->vf_qid, - false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } } - /* fall through */ - case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE; + /* build vlan list */ + fl = NULL; - /* check for any vlan/mac changes */ - if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build vlan list */ - struct bnx2x_vfop_filters *fl = NULL; - - vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_VLAN_FILTER); - if (vfop->rc) + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_VLAN_FILTER); + if (rc) + goto op_err; + + if (fl) { + /* set vlan list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) goto op_err; - - if (fl) { - /* set vlan list */ - rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl, - msg->vf_qid, - false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } } - /* fall through */ - - case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST; - - if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { - unsigned long accept = 0; - struct pf_vf_bulletin_content *bulletin = - BP_VF_BULLETIN(bp, vf->index); - - /* covert VF-PF if mask to bnx2x accept flags */ - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) - __set_bit(BNX2X_ACCEPT_UNICAST, &accept); - - if (msg->rx_mask & - VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) - __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) - __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); + } - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) - __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); + if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { + unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); - /* A packet arriving the vf's mac should be accepted - * with any vlan, unless a vlan has already been - * configured. - */ - if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); - - /* set rx-mode */ - rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, - msg->vf_qid, accept); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; + /* Ignore VF requested mode; instead set a regular mode */ + if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) { + __set_bit(BNX2X_ACCEPT_UNICAST, &accept); + __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); } - /* fall through */ - - case BNX2X_VFOP_MBX_Q_FILTERS_MCAST: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE; - - if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { - /* set mcasts */ - rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast, - msg->n_multicast, false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } - /* fall through */ -op_done: - case BNX2X_VFOP_MBX_Q_FILTERS_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; -op_err: - BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", - vf->abs_vfid, msg->vf_qid, vfop->rc); - goto op_done; - default: - bnx2x_vfop_default(state); + /* A packet arriving the vf's mac should be accepted + * with any vlan, unless a vlan has already been + * configured. + */ + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + + /* set rx-mode */ + rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); + if (rc) + goto op_err; } -} -static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS, - bnx2x_vfop_mbx_qfilters, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters, - cmd->block); + if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { + /* set mcasts */ + rc = bnx2x_vf_mcast(bp, vf, msg->multicast, + msg->n_multicast, false); + if (rc) + goto op_err; } - return -ENOMEM; +op_err: + if (rc) + BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", + vf->abs_vfid, msg->vf_qid, rc); + return rc; } -static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vf_mbx *mbx) +static int bnx2x_filters_validate_mac(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) { - struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc = 0; /* if a mac was already set for this VF via the set vf mac ndo, we only * accept mac configurations of that mac. Why accept them at all? @@ -1715,7 +1609,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, if (filters->n_mac_vlan_filters > 1) { BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", vf->abs_vfid); - vf->op_rc = -EPERM; + rc = -EPERM; goto response; } @@ -1725,10 +1619,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", vf->abs_vfid); - vf->op_rc = -EPERM; + rc = -EPERM; goto response; } } + +response: + return rc; +} + +static int bnx2x_filters_validate_vlan(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); + int rc = 0; + /* if vlan was set by hypervisor we don't allow guest to config vlan */ if (bulletin->valid_bitmap & 1 << VLAN_VALID) { int i; @@ -1739,14 +1645,35 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, VFPF_Q_FILTER_VLAN_TAG_VALID) { BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", vf->abs_vfid); - vf->op_rc = -EPERM; + rc = -EPERM; goto response; } } } /* verify vf_qid */ - if (filters->vf_qid > vf_rxq_count(vf)) + if (filters->vf_qid > vf_rxq_count(vf)) { + rc = -EPERM; + goto response; + } + +response: + return rc; +} + +static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; + int rc; + + rc = bnx2x_filters_validate_mac(bp, vf, filters); + if (rc) + goto response; + + rc = bnx2x_filters_validate_vlan(bp, vf, filters); + if (rc) goto response; DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", @@ -1756,125 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, /* print q_filter message */ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); - vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); - if (vf->op_rc) - goto response; - return; - + rc = bnx2x_vf_mbx_qfilters(bp, vf); response: - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { int qid = mbx->msg->req.q_op.vf_qid; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", vf->abs_vfid, qid); - vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_queue_teardown(bp, vf, qid); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); - vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_close(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); - vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_free(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; - struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; + struct bnx2x_config_rss_params rss; struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + int rc = 0; if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || rss_tlv->rss_key_size != T_ETH_RSS_KEY) { BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", vf->index); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto mbx_resp; } + memset(&rss, 0, sizeof(struct bnx2x_config_rss_params)); + /* set vfop params according to rss tlv */ - memcpy(vf_op_params->ind_table, rss_tlv->ind_table, + memcpy(rss.ind_table, rss_tlv->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); - memcpy(vf_op_params->rss_key, rss_tlv->rss_key, - sizeof(rss_tlv->rss_key)); - vf_op_params->rss_obj = &vf->rss_conf_obj; - vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; + memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key)); + rss.rss_obj = &vf->rss_conf_obj; + rss.rss_result_mask = rss_tlv->rss_result_mask; /* flags handled individually for backward/forward compatability */ - vf_op_params->rss_flags = 0; - vf_op_params->ramrod_flags = 0; + rss.rss_flags = 0; + rss.ramrod_flags = 0; if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) - __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) - __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) - __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4) - __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) - __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) - __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6) - __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) - __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) - __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags); if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { BNX2X_ERR("about to hit a FW assert. aborting...\n"); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto mbx_resp; } - vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); + rc = bnx2x_vf_rss_update(bp, vf, &rss); +mbx_resp: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static int bnx2x_validate_tpa_params(struct bnx2x *bp, + struct vfpf_tpa_tlv *tpa_tlv) +{ + int rc = 0; + + if (tpa_tlv->tpa_client_info.max_sges_for_packet > + U_ETH_MAX_SGES_FOR_PACKET) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_sges received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_sges_for_packet, + U_ETH_MAX_SGES_FOR_PACKET); + } + + if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_tpa_queues, + MAX_AGG_QS(bp)); + } + + return rc; +} + +static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_queue_update_tpa_params vf_op_params; + struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; + int rc = 0; + + memset(&vf_op_params, 0, sizeof(vf_op_params)); + + if (bnx2x_validate_tpa_params(bp, tpa_tlv)) + goto mbx_resp; + + vf_op_params.complete_on_both_clients = + tpa_tlv->tpa_client_info.complete_on_both_clients; + vf_op_params.dont_verify_thr = + tpa_tlv->tpa_client_info.dont_verify_thr; + vf_op_params.max_agg_sz = + tpa_tlv->tpa_client_info.max_agg_size; + vf_op_params.max_sges_pkt = + tpa_tlv->tpa_client_info.max_sges_for_packet; + vf_op_params.max_tpa_queues = + tpa_tlv->tpa_client_info.max_tpa_queues; + vf_op_params.sge_buff_sz = + tpa_tlv->tpa_client_info.sge_buff_size; + vf_op_params.sge_pause_thr_high = + tpa_tlv->tpa_client_info.sge_pause_thr_high; + vf_op_params.sge_pause_thr_low = + tpa_tlv->tpa_client_info.sge_pause_thr_low; + vf_op_params.tpa_mode = + tpa_tlv->tpa_client_info.tpa_mode; + vf_op_params.update_ipv4 = + tpa_tlv->tpa_client_info.update_ipv4; + vf_op_params.update_ipv6 = + tpa_tlv->tpa_client_info.update_ipv6; + + rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); mbx_resp: - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } /* dispatch request */ @@ -1916,6 +1887,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, case CHANNEL_TLV_UPDATE_RSS: bnx2x_vf_mbx_update_rss(bp, vf, mbx); return; + case CHANNEL_TLV_UPDATE_TPA: + bnx2x_vf_mbx_update_tpa(bp, vf, mbx); + return; } } else { @@ -1935,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, /* can we respond to VF (do we have an address for it?) */ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { - /* mbx_resp uses the op_rc of the VF */ - vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; - /* notify the VF that we do not support this request */ - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); } else { /* can't send a response since this VF is unknown to us * just ack the FW to release the mailbox and unlock @@ -1952,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, } } -/* handle new vf-pf message */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) { - struct bnx2x_virtf *vf; - struct bnx2x_vf_mbx *mbx; u8 vf_idx; - int rc; DP(BNX2X_MSG_IOV, "vf pf event received: vfid %d, address_hi %x, address lo %x", @@ -1970,50 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) BNX2X_NR_VIRTFN(bp)) { BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); - goto mbx_done; + return; } + vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); - mbx = BP_VF_MBX(bp, vf_idx); - /* verify an event is not currently being processed - - * debug failsafe only - */ - if (mbx->flags & VF_MSG_INPROCESS) { - BNX2X_ERR("Previous message is still being processed, vf_id %d\n", - vfpf_event->vf_id); - goto mbx_done; - } - vf = BP_VF(bp, vf_idx); + /* Update VFDB with current message and schedule its handling */ + mutex_lock(&BP_VFDB(bp)->event_mutex); + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); + mutex_unlock(&BP_VFDB(bp)->event_mutex); - /* save the VF message address */ - mbx->vf_addr_hi = vfpf_event->msg_addr_hi; - mbx->vf_addr_lo = vfpf_event->msg_addr_lo; - DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", - mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); +} - /* dmae to get the VF request */ - rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, - mbx->vf_addr_hi, mbx->vf_addr_lo, - sizeof(union vfpf_tlvs)/4); - if (rc) { - BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); - goto mbx_error; - } +/* handle new vf-pf messages */ +void bnx2x_vf_mbx(struct bnx2x *bp) +{ + struct bnx2x_vfdb *vfdb = BP_VFDB(bp); + u64 events; + u8 vf_idx; + int rc; - /* process the VF message header */ - mbx->first_tlv = mbx->msg->req.first_tlv; + if (!vfdb) + return; - /* Clean response buffer to refrain from falsely seeing chains */ - memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + mutex_lock(&vfdb->event_mutex); + events = vfdb->event_occur; + vfdb->event_occur = 0; + mutex_unlock(&vfdb->event_mutex); - /* dispatch the request (will prepare the response) */ - bnx2x_vf_mbx_request(bp, vf, mbx); - goto mbx_done; + for_each_vf(bp, vf_idx) { + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); -mbx_error: - bnx2x_vf_release(bp, vf, false); /* non blocking */ -mbx_done: - return; + /* Handle VFs which have pending events */ + if (!(events & (1ULL << vf_idx))) + continue; + + DP(BNX2X_MSG_IOV, + "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n", + vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo, + mbx->first_tlv.resp_msg_offset); + + /* dmae to get the VF request */ + rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, + vf->abs_vfid, mbx->vf_addr_hi, + mbx->vf_addr_lo, + sizeof(union vfpf_tlvs)/4); + if (rc) { + BNX2X_ERR("Failed to copy request VF %d\n", + vf->abs_vfid); + bnx2x_vf_release(bp, vf); + return; + } + + /* process the VF message header */ + mbx->first_tlv = mbx->msg->req.first_tlv; + + /* Clean response buffer to refrain from falsely + * seeing chains. + */ + memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + + /* dispatch the request (will prepare the response) */ + bnx2x_vf_mbx_request(bp, vf, mbx); + } } /* propagate local bulletin board to vf */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 208568bc7a71..c922b81170e5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv { #define PFVF_CAP_RSS 0x00000001 #define PFVF_CAP_DHC 0x00000002 #define PFVF_CAP_TPA 0x00000004 +#define PFVF_CAP_TPA_UPDATE 0x00000008 char fw_ver[32]; u16 db_size; u8 indices_per_sb; @@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv { u32 rx_mask; /* see mask constants at the top of the file */ }; +struct vfpf_tpa_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_tpa_client_info { + aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF]; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_for_packet; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u16 sge_buff_size; + u16 max_agg_size; + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; + } tpa_client_info; +}; + /* close VF (disable VF) */ struct vfpf_close_tlv { struct vfpf_first_tlv first_tlv; @@ -331,6 +351,7 @@ union vfpf_tlvs { struct vfpf_set_q_filters_tlv set_q_filters; struct vfpf_release_tlv release; struct vfpf_rss_tlv update_rss; + struct vfpf_tpa_tlv update_tpa; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; @@ -405,6 +426,7 @@ enum channel_tlvs { CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_PHYS_PORT_ID, + CHANNEL_TLV_UPDATE_TPA, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index fcf9105a5476..09f3fefcbf9c 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,6 +1,6 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, while (retry < 3) { rc = 0; rcu_read_lock(); - ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); + ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); if (ulp_ops) rc = ulp_ops->iscsi_nl_send_msg( cp->ulp_handle[CNIC_ULP_ISCSI], @@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) for (i = 0; i < dma->num_pages; i++) { if (dma->pg_arr[i]) { - dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, + dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, dma->pg_arr[i], dma->pg_map_arr[i]); dma->pg_arr[i] = NULL; } @@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, for (i = 0; i < pages; i++) { dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, - BNX2_PAGE_SIZE, + CNIC_PAGE_SIZE, &dma->pg_map_arr[i], GFP_ATOMIC); if (dma->pg_arr[i] == NULL) @@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, if (!use_pg_tbl) return 0; - dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & - ~(BNX2_PAGE_SIZE - 1); + dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & + ~(CNIC_PAGE_SIZE - 1); dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, &dma->pgtbl_map, GFP_ATOMIC); if (dma->pgtbl == NULL) @@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev) if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { int i, k, arr_size; - cp->ctx_blk_size = BNX2_PAGE_SIZE; - cp->cids_per_blk = BNX2_PAGE_SIZE / 128; + cp->ctx_blk_size = CNIC_PAGE_SIZE; + cp->cids_per_blk = CNIC_PAGE_SIZE / 128; arr_size = BNX2_MAX_CID / cp->cids_per_blk * sizeof(struct cnic_ctx); cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); @@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev) for (i = 0; i < cp->ctx_blks; i++) { cp->ctx_arr[i].ctx = dma_alloc_coherent(&dev->pcidev->dev, - BNX2_PAGE_SIZE, + CNIC_PAGE_SIZE, &cp->ctx_arr[i].mapping, GFP_KERNEL); if (cp->ctx_arr[i].ctx == NULL) @@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) if (udev->l2_ring) return 0; - udev->l2_ring_size = pages * BNX2_PAGE_SIZE; + udev->l2_ring_size = pages * CNIC_PAGE_SIZE; udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, &udev->l2_ring_map, GFP_KERNEL | __GFP_COMP); @@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) return -ENOMEM; udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; - udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); + udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, &udev->l2_buf_map, GFP_KERNEL | __GFP_COMP); @@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & - PAGE_MASK; + CNIC_PAGE_MASK; if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; else @@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & - PAGE_MASK; + CNIC_PAGE_MASK; uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); uinfo->name = "bnx2x_cnic"; @@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; - pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / - PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / + CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); if (ret) return -ENOMEM; - n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; + n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; for (i = 0, j = 0; i < cp->max_cid_space; i++) { long off = CNIC_KWQ16_DATA_SIZE * (i % n); @@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) goto error; } - pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); if (ret) goto error; @@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * BNX2X_ISCSI_R2TQE_SIZE; cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; - pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; - hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); + pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; + hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); cp->num_cqs = req1->num_cqs; if (!dev->max_iscsi_conn) @@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), req1->rq_num_wqes); CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), req1->rq_buffer_size); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) /* init Xstorm RAM */ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) /* init Cstorm RAM */ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) } ctx->cid = cid; - pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); if (ret) goto error; - pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); if (ret) goto error; - pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); if (ret) goto error; @@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; /* TSTORM requires the base address of RQ DB & not PTE */ ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = - req2->rq_page_table_addr_lo & PAGE_MASK; + req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = req2->rq_page_table_addr_hi; ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; @@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], /* CSTORM and USTORM initialization is different, CSTORM requires * CQ DB base & not PTE addr */ ictx->cstorm_st_context.cq_db_base.lo = - req1->cq_page_table_addr_lo & PAGE_MASK; + req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; @@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp) u16 hw_cons, sw_cons; struct cnic_uio_dev *udev = cp->udev; union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) - (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); + (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); u32 cmd; int comp = 0; @@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) int rc; mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(ulp_type); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], + lockdep_is_held(&cnic_lock)); if (ulp_ops && ulp_ops->cnic_get_stats) rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); else @@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; u32 val; - memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); + memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); @@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); - rxbd = udev->l2_ring + BNX2_PAGE_SIZE; + rxbd = udev->l2_ring + CNIC_PAGE_SIZE; for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { dma_addr_t buf_map; int n = (i % cp->l2_rx_ring_size) + 1; @@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; } - val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; + val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); rxbd->rx_bd_haddr_hi = val; - val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; + val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); rxbd->rx_bd_haddr_lo = val; @@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) val = CNIC_RD(dev, BNX2_MQ_CONFIG); val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; - if (BNX2_PAGE_BITS > 12) + if (CNIC_PAGE_BITS > 12) val |= (12 - 8) << 4; else - val |= (BNX2_PAGE_BITS - 8) << 4; + val |= (CNIC_PAGE_BITS - 8) << 4; CNIC_WR(dev, BNX2_MQ_CONFIG, val); @@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) /* Initialize the kernel work queue context. */ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); - val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; + val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; + val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); @@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) /* Initialize the kernel complete queue context. */ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); - val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; + val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; + val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); @@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, u32 cli = cp->ethdev->iscsi_l2_client_id; u32 val; - memset(txbd, 0, BNX2_PAGE_SIZE); + memset(txbd, 0, CNIC_PAGE_SIZE); buf_map = udev->l2_buf_map; for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { @@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_uio_dev *udev = cp->udev; struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + - BNX2_PAGE_SIZE); + CNIC_PAGE_SIZE); struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) - (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); + (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; int i; u32 cli = cp->ethdev->iscsi_l2_client_id; @@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); } - val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; + val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; rxbd->addr_hi = cpu_to_le32(val); data->rx.bd_page_base.hi = cpu_to_le32(val); - val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; + val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; rxbd->addr_lo = cpu_to_le32(val); data->rx.bd_page_base.lo = cpu_to_le32(val); rxcqe += BNX2X_MAX_RCQ_DESC_CNT; - val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; + val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; rxcqe->addr_hi = cpu_to_le32(val); data->rx.cqe_page_base.hi = cpu_to_le32(val); - val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; + val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; rxcqe->addr_lo = cpu_to_le32(val); data->rx.cqe_page_base.lo = cpu_to_le32(val); @@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) msleep(10); } clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); - rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; - memset(rx_ring, 0, BNX2_PAGE_SIZE); + rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; + memset(rx_ring, 0, CNIC_PAGE_SIZE); } static int cnic_register_netdev(struct cnic_dev *dev) diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 0d6b13f854d9..d535ae4228b4 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -1,6 +1,6 @@ /* cnic.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index 95a8e4b11c9f..dcbca6997e8f 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -1,7 +1,7 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8cf6b1926069..5f4d5573a73d 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@ /* cnic_if.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.19" -#define CNIC_MODULE_RELDATE "December 19, 2013" +#define CNIC_MODULE_VERSION "2.5.20" +#define CNIC_MODULE_RELDATE "March 14, 2014" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 @@ -24,6 +24,16 @@ #define MAX_CNIC_ULP_TYPE_EXT 3 #define MAX_CNIC_ULP_TYPE 4 +/* Use CPU native page size up to 16K for cnic ring sizes. */ +#if (PAGE_SHIFT > 14) +#define CNIC_PAGE_BITS 14 +#else +#define CNIC_PAGE_BITS PAGE_SHIFT +#endif +#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS)) +#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) +#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1)) + struct kwqe { u32 kwqe_op_flag; diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile new file mode 100644 index 000000000000..31f55a90a197 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BCMGENET) += genet.o +genet-objs := bcmgenet.o bcmmii.o diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c new file mode 100644 index 000000000000..adf8acbddf56 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -0,0 +1,2584 @@ +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/interrupt.h> +#include <linux/string.h> +#include <linux/if_ether.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/pm.h> +#include <linux/clk.h> +#include <linux/version.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <net/arp.h> + +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/phy.h> + +#include <asm/unaligned.h> + +#include "bcmgenet.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_DEFAULT_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, + void __iomem *d) +{ + return __raw_readl(d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitely configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_length_status(priv, d, val); + dmadesc_set_addr(priv, d, addr); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitely configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return __raw_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + __raw_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return __raw_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + __raw_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY, + DMA_RING_PRIORITY, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY] = 0x30, + [DMA_RING_PRIORITY] = 0x38, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY] = 0x34, + [DMA_RING_PRIORITY] = 0x3C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY] = 0x34, + [DMA_RING_PRIORITY] = 0x3C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return __raw_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return __raw_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return __raw_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, + u32 val, + enum dma_ring_reg r) +{ + __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return __raw_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, + u32 val, + enum dma_ring_reg r) +{ + __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static int bcmgenet_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int bcmgenet_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int bcmgenet_set_rx_csum(struct net_device *dev, + netdev_features_t wanted) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 rbuf_chk_ctrl; + bool rx_csum_en; + + rx_csum_en = !!(wanted & NETIF_F_RXCSUM); + + rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + + /* enable rx checksumming */ + if (rx_csum_en) + rbuf_chk_ctrl |= RBUF_RXCHK_EN; + else + rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; + priv->desc_rxchk_en = rx_csum_en; + + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (rx_csum_en && priv->crc_fwd_en) + rbuf_chk_ctrl |= RBUF_SKIP_FCS; + else + rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; + + bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); + + return 0; +} + +static int bcmgenet_set_tx_csum(struct net_device *dev, + netdev_features_t wanted) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + bool desc_64b_en; + u32 tbuf_ctrl, rbuf_ctrl; + + tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); + rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + + desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + + /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ + if (desc_64b_en) { + tbuf_ctrl |= RBUF_64B_EN; + rbuf_ctrl |= RBUF_64B_EN; + } else { + tbuf_ctrl &= ~RBUF_64B_EN; + rbuf_ctrl &= ~RBUF_64B_EN; + } + priv->desc_64b_en = desc_64b_en; + + bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); + bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); + + return 0; +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ dev->features; + netdev_features_t wanted = dev->wanted_features; + int ret = 0; + + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) + ret = bcmgenet_set_tx_csum(dev, wanted); + if (changed & (NETIF_F_RXCSUM)) + ret = bcmgenet_set_rx_csum(dev, wanted); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); + strlcpy(info->version, "v2.0", sizeof(info->version)); + info->n_stats = BCMGENET_STATS_LEN; + +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + continue; + case BCMGENET_STAT_MIB_RX: + case BCMGENET_STAT_MIB_TX: + case BCMGENET_STAT_RUNT: + if (s->type != BCMGENET_STAT_MIB_RX) + offset = BCMGENET_STAT_OFFSET; + val = bcmgenet_umac_readl(priv, UMAC_MIB_START + + j + offset); + break; + case BCMGENET_STAT_MISC: + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, s->reg_offset); + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + data[i] = *(u32 *)p; + } +} + +/* standard ethtool support functions. */ +static struct ethtool_ops bcmgenet_ethtool_ops = { + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_settings = bcmgenet_get_settings, + .set_settings = bcmgenet_set_settings, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, +}; + +/* Power down the unimac, based on mode. */ +static void bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->phydev); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + bcmgenet_mii_reset(priv->dev); + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= (EXT_PWR_DOWN_PHY | + EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + break; + default: + break; + } +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | + EXT_PWR_DOWN_BIAS); + /* fallthrough */ + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + reg |= EXT_PWR_DN_EN_LD; + break; + default: + break; + } + + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_mii_reset(priv->dev); +} + +/* ioctl handle special commands that are not present in ethtool. */ +static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int val = 0; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + val = -ENODEV; + else + val = phy_mii_ioctl(priv->phydev, rq, cmd); + break; + + default: + val = -EINVAL; + break; + } + + return val; +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +/* Simple helper to free a control block's resources */ +static void bcmgenet_free_cb(struct enet_cb *cb) +{ + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + dma_unmap_addr_set(cb, dma_addr, 0); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(priv, + UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(priv, + UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(priv, + (1 << ring->index), INTRL2_CPU_MASK_CLEAR); + priv->int1_mask &= ~(1 << ring->index); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(priv, + (1 << ring->index), INTRL2_CPU_MASK_SET); + priv->int1_mask |= (1 << ring->index); +} + +/* Unlocked version of the reclaim routine */ +static void __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int last_tx_cn, last_c_index, num_tx_bds; + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + unsigned int c_index; + + /* Compute how many buffers are transmited since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + txq = netdev_get_tx_queue(dev, ring->queue); + + last_c_index = ring->c_index; + num_tx_bds = ring->size; + + c_index &= (num_tx_bds - 1); + + if (c_index >= last_c_index) + last_tx_cn = c_index - last_c_index; + else + last_tx_cn = num_tx_bds - last_c_index + c_index; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", + __func__, ring->index, + c_index, last_tx_cn, last_c_index); + + /* Reclaim transmitted buffers */ + while (last_tx_cn-- > 0) { + tx_cb_ptr = ring->cbs + last_c_index; + if (tx_cb_ptr->skb) { + dev->stats.tx_bytes += tx_cb_ptr->skb->len; + dma_unmap_single(&dev->dev, + dma_unmap_addr(tx_cb_ptr, dma_addr), + tx_cb_ptr->skb->len, + DMA_TO_DEVICE); + bcmgenet_free_cb(tx_cb_ptr); + } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { + dev->stats.tx_bytes += + dma_unmap_len(tx_cb_ptr, dma_len); + dma_unmap_page(&dev->dev, + dma_unmap_addr(tx_cb_ptr, dma_addr), + dma_unmap_len(tx_cb_ptr, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); + } + dev->stats.tx_packets++; + ring->free_bds += 1; + + last_c_index++; + last_c_index &= (num_tx_bds - 1); + } + + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) + ring->int_disable(priv, ring); + + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + + ring->c_index = c_index; +} + +static void bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Transmits a single SKB (either head of a fragment or a single SKB) + * caller must hold priv->lock + */ +static int bcmgenet_xmit_single(struct net_device *dev, + struct sk_buff *skb, + u16 dma_desc_flags, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct enet_cb *tx_cb_ptr; + unsigned int skb_len; + dma_addr_t mapping; + u32 length_status; + int ret; + + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + if (unlikely(!tx_cb_ptr)) + BUG(); + + tx_cb_ptr->skb = skb; + + skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); + + mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); + ret = dma_mapping_error(kdev, mapping); + if (ret) { + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + dev_kfree_skb(skb); + return ret; + } + + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); + length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | + DMA_TX_APPEND_CRC; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + length_status |= DMA_TX_DO_CSUM; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= 1; + ring->prod_index += 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + return 0; +} + +/* Transmit a SKB fragement */ +static int bcmgenet_xmit_frag(struct net_device *dev, + skb_frag_t *frag, + u16 dma_desc_flags, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct enet_cb *tx_cb_ptr; + dma_addr_t mapping; + int ret; + + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + if (unlikely(!tx_cb_ptr)) + BUG(); + tx_cb_ptr->skb = NULL; + + mapping = skb_frag_dma_map(kdev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + ret = dma_mapping_error(kdev, mapping); + if (ret) { + netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", + __func__); + return ret; + } + + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, + (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); + + + ring->free_bds -= 1; + ring->prod_index += 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + return 0; +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) +{ + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + u16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + dev_kfree_skb(skb); + if (!new_skb) { + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + return -ENOMEM; + } + skb = new_skb; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = htons(skb->protocol); + switch (ip_ver) { + case ETH_P_IP: + ip_proto = ip_hdr(skb)->protocol; + break; + case ETH_P_IPV6: + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return 0; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset); + + /* Set the length valid bit for TCP and UDP and just set + * the special UDP flag for IPv4, else just set to 0. + */ + if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { + tx_csum_info |= STATUS_TX_CSUM_LV; + if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + } else + tx_csum_info = 0; + + status->tx_csum_info = tx_csum_info; + } + + return 0; +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_tx_ring *ring = NULL; + struct netdev_queue *txq; + unsigned long flags = 0; + int nr_frags, index; + u16 dma_desc_flags; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + nr_frags = skb_shinfo(skb)->nr_frags; + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + spin_lock_irqsave(&ring->lock, flags); + if (ring->free_bds <= nr_frags + 1) { + netif_tx_stop_queue(txq); + netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + ret = NETDEV_TX_BUSY; + goto out; + } + + /* set the SKB transmit checksum */ + if (priv->desc_64b_en) { + ret = bcmgenet_put_tx_csum(dev, skb); + if (ret) { + ret = NETDEV_TX_OK; + goto out; + } + } + + dma_desc_flags = DMA_SOP; + if (nr_frags == 0) + dma_desc_flags |= DMA_EOP; + + /* Transmit single SKB or head of fragment list */ + ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); + if (ret) { + ret = NETDEV_TX_OK; + goto out; + } + + /* xmit fragment */ + for (i = 0; i < nr_frags; i++) { + ret = bcmgenet_xmit_frag(dev, + &skb_shinfo(skb)->frags[i], + (i == nr_frags - 1) ? DMA_EOP : 0, ring); + if (ret) { + ret = NETDEV_TX_OK; + goto out; + } + } + + skb_tx_timestamp(skb); + + /* we kept a software copy of how much we should advance the TDMA + * producer index, now write it down to the hardware + */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { + netif_tx_stop_queue(txq); + ring->int_enable(priv, ring); + } + +out: + spin_unlock_irqrestore(&ring->lock, flags); + + return ret; +} + + +static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + dma_addr_t mapping; + int ret; + + skb = netdev_alloc_skb(priv->dev, + priv->rx_buf_len + SKB_ALIGNMENT); + if (!skb) + return -ENOMEM; + + /* a caller did not release this control block */ + WARN_ON(cb->skb != NULL); + cb->skb = skb; + mapping = dma_map_single(kdev, skb->data, + priv->rx_buf_len, DMA_FROM_DEVICE); + ret = dma_mapping_error(kdev, mapping); + if (ret) { + bcmgenet_free_cb(cb); + netif_err(priv, rx_err, priv->dev, + "%s DMA map failed\n", __func__); + return ret; + } + + dma_unmap_addr_set(cb, dma_addr, mapping); + /* assign packet, prepare descriptor, and advance pointer */ + + dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); + + /* turn on the newly assigned BD for DMA to use */ + priv->rx_bd_assign_index++; + priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); + + priv->rx_bd_assign_ptr = priv->rx_bds + + (priv->rx_bd_assign_index * DMA_DESC_SIZE); + + return 0; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, + unsigned int budget) +{ + struct net_device *dev = priv->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len, err; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int p_index; + unsigned int chksum_ok = 0; + + p_index = bcmgenet_rdma_ring_readl(priv, + DESC_INDEX, RDMA_PROD_INDEX); + p_index &= DMA_P_INDEX_MASK; + + if (p_index < priv->rx_c_index) + rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - + priv->rx_c_index + p_index; + else + rxpkttoprocess = p_index - priv->rx_c_index; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + + /* Unmap the packet contents such that we can use the + * RSV from the 64 bytes descriptor when enabled and save + * a 32-bits register read + */ + cb = &priv->rx_cbs[priv->rx_read_ptr]; + skb = cb->skb; + dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), + priv->rx_buf_len, DMA_FROM_DEVICE); + + if (!priv->desc_64b_en) { + dma_length_status = dmadesc_get_length_status(priv, + priv->rx_bds + + (priv->rx_read_ptr * + DMA_DESC_SIZE)); + } else { + struct status_64 *status; + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, priv->rx_c_index, priv->rx_read_ptr, + dma_length_status); + + rxpktprocessed++; + + priv->rx_read_ptr++; + priv->rx_read_ptr &= (priv->num_rx_bds - 1); + + /* out of memory, just drop packets at the hardware level */ + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + goto refill; + } + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "Droping fragmented packet!\n"); + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + goto refill; + } + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + + /* discard the packet and advance consumer index.*/ + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + goto refill; + } /* error packet */ + + chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && + priv->desc_rxchk_en; + + skb_put(skb, len); + if (priv->desc_64b_en) { + skb_pull(skb, 64); + len -= 64; + } + + if (likely(chksum_ok)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* remove hardware 2bytes added for IP alignment */ + skb_pull(skb, 2); + len -= 2; + + if (priv->crc_fwd_en) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + /* Notify kernel */ + napi_gro_receive(&priv->napi, skb); + cb->skb = NULL; + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + + /* refill RX path on the current control block */ +refill: + err = bcmgenet_rx_refill(priv, cb); + if (err) + netif_err(priv, rx_err, dev, "Rx refill failed\n"); + } + + return rxpktprocessed; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) +{ + struct enet_cb *cb; + int ret = 0; + int i; + + netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[priv->rx_bd_assign_index]; + if (cb->skb) + continue; + + /* set the DMA descriptor length once and for all + * it will only change if we support dynamically sizing + * priv->rx_buf_len, but we do not + */ + dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr, + priv->rx_buf_len << DMA_BUFLENGTH_SHIFT); + + ret = bcmgenet_rx_refill(priv, cb); + if (ret) + break; + + } + + return ret; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(&priv->dev->dev, + dma_unmap_addr(cb, dma_addr), + priv->rx_buf_len, DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + if (cb->skb) + bcmgenet_free_cb(cb); + } +} + +static int reset_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + unsigned int timeout = 0; + u32 reg; + + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, 0, UMAC_CMD); + + /* issue soft reset, wait for it to complete */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + while (timeout++ < 1000) { + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (!(reg & CMD_SW_RESET)) + return 0; + + udelay(1); + } + + if (timeout == 1000) { + dev_err(kdev, + "timeout waiting for MAC to come out of resetn\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int ret; + u32 reg, cpu_mask_clear; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + ret = reset_umac(priv); + if (ret) + return ret; + + bcmgenet_umac_writel(priv, 0, UMAC_CMD); + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init rx registers, enable ip header optimization */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + + cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; + + dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); + + /* Monitor cable plug/unpluged event for internal PHY */ + if (phy_is_internal(priv->phydev)) + cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); + else if (priv->ext_phy) + cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); + else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) + cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; + + bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, + INTRL2_CPU_MASK_CLEAR); + + /* Enable rx/tx engine.*/ + dev_dbg(kdev, "done init umac\n"); + + return 0; +} + +/* Initialize all house-keeping variables for a TX ring, along + * with corresponding hardware registers + */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int write_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + unsigned int first_bd; + + spin_lock_init(&ring->lock); + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + write_ptr; + ring->size = size; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = write_ptr; + ring->cb_ptr = write_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + /* Unclassified traffic goes to ring 16 */ + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), + DMA_RING_BUF_SIZE); + + first_bd = write_ptr; + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, first_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size) +{ + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + priv->num_rx_bds = TOTAL_DESC; + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->rx_bd_assign_ptr = priv->rx_bds; + priv->rx_bd_assign_index = 0; + priv->rx_c_index = 0; + priv->rx_read_ptr = 0; + priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + ret = bcmgenet_alloc_rx_buffers(priv); + if (ret) { + kfree(priv->rx_cbs); + return ret; + } + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), + DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, + words_per_bd * size - 1, DMA_END_ADDR); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); + + return ret; +} + +/* init multi xmit queues, only available for GENET2+ + * the queue is partitioned as follows: + * + * queue 0 - 3 is priority based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT + * descriptors: 256 - (number of tx queues * bds per queues) = 128 + * descriptors. + * + * The transmit control block pool is then partitioned as following: + * - tx_cbs[0...127] are for queue 16 + * - tx_ring_cbs[0] points to tx_cbs[128..159] + * - tx_ring_cbs[1] points to tx_cbs[160..191] + * - tx_ring_cbs[2] points to tx_cbs[192..223] + * - tx_ring_cbs[3] points to tx_cbs[224..255] + */ +static void bcmgenet_init_multiq(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int i, dma_enable; + u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0; + + if (!netif_is_multiqueue(dev)) { + netdev_warn(dev, "called with non multi queue aware HW\n"); + return; + } + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + for (i = 0; i < priv->hw_params->tx_queues; i++) { + /* first 64 tx_cbs are reserved for default tx queue + * (ring 16) + */ + bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, + i * priv->hw_params->bds_cnt, + (i + 1) * priv->hw_params->bds_cnt); + + /* Configure ring as decriptor ring and setup priority */ + ring_cfg |= 1 << i; + dma_priority |= ((GENET_Q0_PRIORITY + i) << + (GENET_MAX_MQ_CNT + 1) * i); + dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); + } + + /* Enable rings */ + reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); + reg |= ring_cfg; + bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); + + /* Use configured rings priority and set ring #16 priority */ + reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY); + reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20); + reg |= dma_priority; + bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + if (dma_enable) + reg |= DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + int i; + + /* disable DMA */ + bcmgenet_rdma_writel(priv, 0, DMA_CTRL); + bcmgenet_tdma_writel(priv, 0, DMA_CTRL); + + for (i = 0; i < priv->num_tx_bds; i++) { + if (priv->tx_cbs[i].skb != NULL) { + dev_kfree_skb(priv->tx_cbs[i].skb); + priv->tx_cbs[i].skb = NULL; + } + } + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + + netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); + + /* by default, enable ring 16 (descriptor based) */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); + if (ret) { + netdev_err(priv->dev, "failed to initialize RX ring\n"); + return ret; + } + + /* init rDma */ + bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + + /* Init tDma */ + bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + + /* Initialize commont TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + bcmgenet_fini_dma(priv); + return -ENOMEM; + } + + /* initialize multi xmit queue */ + bcmgenet_init_multiq(priv->dev); + + /* initialize special ring 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, + priv->hw_params->tx_queues * priv->hw_params->bds_cnt, + TOTAL_DESC); + + return 0; +} + +/* NAPI polling method*/ +static int bcmgenet_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_priv *priv = container_of(napi, + struct bcmgenet_priv, napi); + unsigned int work_done; + + /* tx reclaim */ + bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); + + work_done = bcmgenet_desc_rx(priv, budget); + + /* Advancing our consumer index*/ + priv->rx_c_index += work_done; + priv->rx_c_index &= DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, DESC_INDEX, + priv->rx_c_index, RDMA_CONS_INDEX); + if (work_done < budget) { + napi_complete(napi); + bcmgenet_intrl2_0_writel(priv, + UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR); + } + + return work_done; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + /* Link UP/DOWN event */ + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { + phy_mac_interrupt(priv->phydev, + priv->irq0_stat & UMAC_IRQ_LINK_UP); + priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); + } +} + +/* bcmgenet_isr1: interrupt handler for ring buffer. */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + unsigned int index; + + /* Save irq status for bottom-half processing. */ + priv->irq1_stat = + bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~priv->int1_mask; + /* clear inerrupts*/ + bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); + /* Check the MBDONE interrupts. + * packet is done, reclaim descriptors + */ + if (priv->irq1_stat & 0x0000ffff) { + index = 0; + for (index = 0; index < 16; index++) { + if (priv->irq1_stat & (1 << index)) + bcmgenet_tx_reclaim(priv->dev, + &priv->tx_rings[index]); + } + } + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: Handle various interrupts. */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + + /* Save irq status for bottom-half processing. */ + priv->irq0_stat = + bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + /* clear inerrupts*/ + bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", priv->irq0_stat); + + if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { + /* We use NAPI(software interrupt throttling, if + * Rx Descriptor throttling is not used. + * Disable interrupt, will be enabled in the poll method. + */ + if (likely(napi_schedule_prep(&priv->napi))) { + bcmgenet_intrl2_0_writel(priv, + UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET); + __napi_schedule(&priv->napi); + } + } + if (priv->irq0_stat & + (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { + /* Tx reclaim */ + bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); + } + if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | + UMAC_IRQ_PHY_DET_F | + UMAC_IRQ_LINK_UP | + UMAC_IRQ_LINK_DOWN | + UMAC_IRQ_HFB_SM | + UMAC_IRQ_HFB_MM | + UMAC_IRQ_MPD_R)) { + /* all other interested interrupts handled in bottom half */ + schedule_work(&priv->bcmgenet_irq_work); + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + wake_up(&priv->wq); + } + + return IRQ_HANDLED; +} + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | + (addr[2] << 8) | addr[3], UMAC_MAC0); + bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); +} + +static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) +{ + int ret; + + /* From WOL-enabled suspend, switch to regular clock */ + clk_disable(priv->clk_wol); + /* init umac registers to synchronize s/w with h/w */ + ret = init_umac(priv); + if (ret) + return ret; + + phy_init_hw(priv->phydev); + /* Speed settings must be restored */ + bcmgenet_mii_config(priv->dev); + + return 0; +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + u32 reg; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + ret = init_umac(priv); + if (ret) + goto err_clk_disable; + + /* disable ethernet MAC while updating its registers */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~(CMD_TX_EN | CMD_RX_EN); + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + if (priv->wol_enabled) { + ret = bcmgenet_wol_resume(priv); + if (ret) + return ret; + } + + if (phy_is_internal(priv->phydev)) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= EXT_ENERGY_DET_MASK; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_fini_dma; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + /* Start the network engine */ + napi_enable(&priv->napi); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg |= (CMD_TX_EN | CMD_RX_EN); + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* Make sure we reflect the value of CRC_CMD_FWD */ + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + device_set_wakeup_capable(&dev->dev, 1); + + if (phy_is_internal(priv->phydev)) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + netif_tx_start_all_queues(dev); + + phy_start(priv->phydev); + + return 0; + +err_irq0: + free_irq(priv->irq0, dev); +err_fini_dma: + bcmgenet_fini_dma(priv); +err_clk_disable: + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + return ret; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, + "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, + "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + return ret; +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + u32 reg; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + phy_stop(priv->phydev); + + /* Disable MAC receive */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + netif_tx_stop_all_queues(dev); + + ret = bcmgenet_dma_teardown(priv); + if (ret) + return ret; + + /* Disable MAC transmit. TX DMA disabled have to done before this */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_TX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + napi_disable(&priv->napi); + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + /* Wait for pending work items to complete - we are stopping + * the clock now. Since interrupts are disabled, no new work + * will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + if (phy_is_internal(priv->phydev)) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + if (priv->wol_enabled) + clk_enable(priv->clk_wol); + + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + + return 0; +} + +static void bcmgenet_timeout(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + dev->trans_start = jiffies; + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MC_COUNT 16 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + unsigned char *addr, + int *i, + int *mc) +{ + u32 reg; + + bcmgenet_umac_writel(priv, + addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, + addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); + reg |= (1 << (MAX_MC_COUNT - *mc)); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); + *i += 2; + (*mc)++; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, mc; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Promiscous mode */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (dev->flags & IFF_PROMISC) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* UniMac doesn't support ALLMULTI */ + if (dev->flags & IFF_ALLMULTI) { + netdev_warn(dev, "ALLMULTI is not supported\n"); + return; + } + + /* update MDF filter */ + i = 0; + mc = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); + /* Unicast list*/ + if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) + return; + + if (!netdev_uc_empty(dev)) + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); + /* Multicast */ + if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) + return; + + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_do_ioctl = bcmgenet_ioctl, + .ndo_set_features = bcmgenet_set_features, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .rx_queues = 0, + .bds_cnt = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .rx_queues = 4, + .bds_cnt = 32, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .rx_queues = 4, + .bds_cnt = 32, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, + }, + [GENET_V4] = { + .tx_queues = 4, + .rx_queues = 4, + .bds_cnt = 32, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + + if (GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; + priv->version = GENET_V4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; + priv->version = GENET_V3; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + priv->dma_rx_chk_bit = DMA_RX_CHK_V12; + priv->version = GENET_V2; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + priv->dma_rx_chk_bit = DMA_RX_CHK_V12; + priv->version = GENET_V1; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, RXq: %1d, BDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->rx_queues, params->bds_cnt, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, + { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, + { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, + { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, + { }, +}; + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + const struct of_device_id *of_id; + struct bcmgenet_priv *priv; + struct net_device *dev; + const void *macaddr; + struct resource *r; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + of_id = of_match_node(bcmgenet_match, dn); + if (!of_id) + return -EINVAL; + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + priv->irq1 = platform_get_irq(pdev, 1); + if (!priv->irq0 || !priv->irq1) { + dev_err(&pdev->dev, "can't find IRQs\n"); + err = -EINVAL; + goto err; + } + + macaddr = of_get_mac_address(dn); + if (!macaddr) { + dev_err(&pdev->dev, "can't find MAC address\n"); + err = -EINVAL; + goto err; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + ether_addr_copy(dev->dev_addr, macaddr); + dev->watchdog_timeo = 2 * HZ; + SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops); + dev->netdev_ops = &bcmgenet_netdev_ops; + netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set hardware features */ + dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + netdev_boot_setup_check(dev); + + priv->dev = dev; + priv->pdev = pdev; + priv->version = (enum bcmgenet_version)of_id->data; + + bcmgenet_set_hw_params(priv); + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) + dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); + + priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) + dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); + + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); + + err = reset_umac(priv); + if (err) + goto err_clk_disable; + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + err = register_netdev(dev); + if (err) + goto err_clk_disable; + + /* Turn off the main clock, WOL clock is handled separately */ + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + + return err; + +err_clk_disable: + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .driver = { + .name = "bcmgenet", + .owner = THIS_MODULE, + .of_match_table = bcmgenet_match, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); +MODULE_ALIAS("platform:bcmgenet"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h new file mode 100644 index 000000000000..0f117105fed1 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -0,0 +1,628 @@ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * +*/ +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <linux/clk.h> +#include <linux/mii.h> +#include <linux/if_vlan.h> +#include <linux/phy.h> + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; +}; + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F + +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define UMAC_SPEED_10 0 +#define UMAC_SPEED_100 1 +#define UMAC_SPEED_1000 2 +#define UMAC_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) + +#define UMAC_MAC0 0x00C +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 + +#define UMAC_TX_FLUSH 0x334 + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT 0x61C + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT 0x634 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_BP_MC 0x0C + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN (1 << 0) +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x00FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 rx_queues; + u8 bds_cnt; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *); +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + u32 int0_mask; + u32 int1_mask; + + /* NAPI for descriptor based rx */ + struct napi_struct napi ____cacheline_aligned; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + void __iomem *rx_bd_assign_ptr; + int rx_bd_assign_index; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + unsigned int rx_read_ptr; + unsigned int rx_c_index; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + + /* MDIO bus variables */ + wait_queue_head_t wq; + struct phy_device *phydev; + struct device_node *phy_dn; + struct mii_bus *mii_bus; + + /* PHY device variables */ + int old_duplex; + int old_link; + int old_pause; + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + unsigned int irq0_stat; + unsigned int irq1_stat; + + /* HW descriptors/checksum variables */ + bool desc_64b_en; + bool desc_rxchk_en; + bool crc_fwd_en; + + unsigned int dma_rx_chk_bit; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + + /* WOL */ + unsigned long wol_enabled; + struct clk *clk_wol; + u32 wolopts; + + struct bcmgenet_mib_counters mib; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + return __raw_readl(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + __raw_writel(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_mii_reset(struct net_device *dev); + +#endif /* __BCMGENET_H__ */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c new file mode 100644 index 000000000000..4608673beaff --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -0,0 +1,464 @@ +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/brcmphy.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> + +#include "bcmgenet.h" + +/* read a value from the MII */ +static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) +{ + int ret; + struct net_device *dev = bus->priv; + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + + bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | + (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); + /* Start MDIO transaction*/ + reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); + reg |= MDIO_START_BUSY; + bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); + + if (ret & MDIO_READ_FAIL) + return -EIO; + + return ret & 0xffff; +} + +/* write a value to the MII */ +static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, + int location, u16 val) +{ + struct net_device *dev = bus->priv; + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + + bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | + (location << MDIO_REG_SHIFT) | (0xffff & val)), + UMAC_MDIO_CMD); + reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); + reg |= MDIO_START_BUSY; + bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & + MDIO_START_BUSY), + HZ / 100); + + return 0; +} + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +static void bcmgenet_mii_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + u32 reg, cmd_bits = 0; + unsigned int status_changed = 0; + + if (priv->old_link != phydev->link) { + status_changed = 1; + priv->old_link = phydev->link; + } + + if (phydev->link) { + /* program UMAC and RGMII block based on established link + * speed, pause, and duplex. + * the speed set in umac->cmd tell RGMII block which clock + * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit. + * receive clock is provided by PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = UMAC_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = UMAC_SPEED_100; + else + cmd_bits = UMAC_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + if (priv->old_duplex != phydev->duplex) { + status_changed = 1; + priv->old_duplex = phydev->duplex; + } + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) + cmd_bits |= CMD_HD_EN; + + if (priv->old_pause != phydev->pause) { + status_changed = 1; + priv->old_pause = phydev->pause; + } + + /* pause capability */ + if (!phydev->pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + if (status_changed) + phy_print_status(phydev); +} + +void bcmgenet_mii_reset(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (priv->phydev) { + phy_init_hw(priv->phydev); + phy_start_aneg(priv->phydev); + } +} + +static void bcmgenet_ephy_power_up(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (!GENET_IS_V4(priv)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(2); + + reg &= ~EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(20); +} + +static void bcmgenet_internal_phy_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + + /* Power up EPHY */ + bcmgenet_ephy_power_up(dev); + /* enable APD */ + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_mii_reset(dev); +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + u32 reg; + + /* Speed settings are set in bcmgenet_mii_setup() */ + reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); + reg |= LED_ACT_SOURCE_MAC; + bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); +} + +int bcmgenet_mii_config(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + priv->ext_phy = !phy_is_internal(priv->phydev) && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + if (phy_is_internal(priv->phydev)) + priv->phy_interface = PHY_INTERFACE_MODE_NA; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_NA: + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + if (phy_is_internal(priv->phydev)) { + phy_name = "internal PHY"; + bcmgenet_internal_phy_setup(dev); + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phy_name = "MoCA"; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phydev->supported &= PHY_BASIC_FEATURES; + bcmgenet_sys_writel(priv, + PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if ((priv->phydev->supported & PHY_BASIC_FEATURES) == + PHY_BASIC_FEATURES) + port_ctrl = PORT_MODE_EXT_RVMII_25; + else + port_ctrl = PORT_MODE_EXT_RVMII_50; + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * RGMII: Add 2ns delay on TXC (90 degree shift) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + id_mode_dis = BIT(16); + /* fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + if (id_mode_dis) + phy_name = "external RGMII (no delay)"; + else + phy_name = "external RGMII (TX delay)"; + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg |= RGMII_MODE_EN | id_mode_dis; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + bcmgenet_sys_writel(priv, + PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +static int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev; + unsigned int phy_flags; + int ret; + + if (priv->phydev) { + pr_info("PHY already attached\n"); + return 0; + } + + if (priv->phy_dn) + phydev = of_phy_connect(dev, priv->phy_dn, + bcmgenet_mii_setup, 0, + priv->phy_interface); + else + phydev = of_phy_connect_fixed_link(dev, + bcmgenet_mii_setup, + priv->phy_interface); + + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + + priv->old_link = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + priv->phydev = phydev; + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev); + if (ret) { + phy_disconnect(priv->phydev); + return ret; + } + + phy_flags = PHY_BRCM_100MBPS_WAR; + + /* workarounds are only needed for 100Mpbs PHYs, and + * never on GENET V1 hardware + */ + if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv)) + phy_flags = 0; + + phydev->dev_flags |= phy_flags; + phydev->advertising = phydev->supported; + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs + */ + if (phy_is_internal(priv->phydev)) + priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; + else + priv->mii_bus->irq[phydev->addr] = PHY_POLL; + + pr_info("attached PHY at address %d [%s]\n", + phydev->addr, phydev->drv->name); + + return 0; +} + +static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) +{ + struct mii_bus *bus; + + if (priv->mii_bus) + return 0; + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + pr_err("failed to allocate\n"); + return -ENOMEM; + } + + bus = priv->mii_bus; + bus->priv = priv->dev; + bus->name = "bcmgenet MII bus"; + bus->parent = &priv->pdev->dev; + bus->read = bcmgenet_mii_read; + bus->write = bcmgenet_mii_write; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", + priv->pdev->name, priv->pdev->id); + + bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!bus->irq) { + mdiobus_free(priv->mii_bus); + return -ENOMEM; + } + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + struct device_node *mdio_dn; + char *compat; + int ret; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return -ENOMEM; + + mdio_dn = of_find_compatible_node(dn, NULL, compat); + kfree(compat); + if (!mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return -ENODEV; + } + + ret = of_mdiobus_register(priv->mii_bus, mdio_dn); + if (ret) { + dev_err(kdev, "failed to register MDIO bus\n"); + return ret; + } + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* Get the link mode */ + priv->phy_interface = of_get_phy_mode(dn); + + return 0; +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_alloc(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_of_init(priv); + if (ret) + goto out_free; + + ret = bcmgenet_mii_probe(dev); + if (ret) + goto out; + + return 0; + +out: + mdiobus_unregister(priv->mii_bus); +out_free: + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); +} diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3b6d0ba86c71..b9f7022f4e81 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1401,11 +1401,6 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) return ret; } -static int tg3_mdio_reset(struct mii_bus *bp) -{ - return 0; -} - static void tg3_mdio_config_5785(struct tg3 *tp) { u32 val; @@ -1542,7 +1537,6 @@ static int tg3_mdio_init(struct tg3 *tp) tp->mdio_bus->parent = &tp->pdev->dev; tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; - tp->mdio_bus->reset = &tg3_mdio_reset; tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); tp->mdio_bus->irq = &tp->mdio_irq[0]; @@ -6322,6 +6316,7 @@ static const struct ptp_clock_info tg3_ptp_caps = { .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 1, + .n_pins = 0, .pps = 0, .adjfreq = tg3_ptp_adjfreq, .adjtime = tg3_ptp_adjtime, @@ -6593,7 +6588,7 @@ static void tg3_tx(struct tg3_napi *tnapi) pkts_compl++; bytes_compl += skb->len; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); if (unlikely(tx_bug)) { tg3_tx_recover(tp); @@ -6924,7 +6919,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if (len > (tp->dev->mtu + ETH_HLEN) && skb->protocol != htons(ETH_P_8021Q)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); goto drop_it_no_recycle; } @@ -7807,7 +7802,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, PCI_DMA_TODEVICE); /* Make sure the mapping succeeded */ if (pci_dma_mapping_error(tp->pdev, new_addr)) { - dev_kfree_skb(new_skb); + dev_kfree_skb_any(new_skb); ret = -1; } else { u32 save_entry = *entry; @@ -7822,13 +7817,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, new_skb->len, base_flags, mss, vlan)) { tg3_tx_skb_unmap(tnapi, save_entry, -1); - dev_kfree_skb(new_skb); + dev_kfree_skb_any(new_skb); ret = -1; } } } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); *pskb = new_skb; return ret; } @@ -7871,7 +7866,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) } while (segs); tg3_tso_bug_end: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -7923,8 +7918,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) struct iphdr *iph; u32 tcp_opt_len, hdr_len; - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + if (skb_cow_head(skb, 0)) goto drop; iph = ip_hdr(skb); @@ -8093,7 +8087,7 @@ dma_error: tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; drop: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); drop_nofree: tp->tx_dropped++; return NETDEV_TX_OK; @@ -11361,12 +11355,10 @@ static bool tg3_enable_msix(struct tg3 *tp) msix_ent[i].vector = 0; } - rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); + rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); if (rc < 0) { return false; - } else if (rc != 0) { - if (pci_enable_msix(tp->pdev, msix_ent, rc)) - return false; + } else if (rc < tp->irq_cnt) { netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", tp->irq_cnt, rc); tp->irq_cnt = rc; @@ -17649,8 +17641,6 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_init_bufmgr_config(tp); - features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - /* 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ @@ -17682,7 +17672,8 @@ static int tg3_init_one(struct pci_dev *pdev, features |= NETIF_F_TSO_ECN; } - dev->features |= features; + dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features |= features; /* diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 1803c3959044..354ae9792bad 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1704,7 +1704,7 @@ bfa_flash_sem_get(void __iomem *bar) while (!bfa_raw_sem_get(bar)) { if (--n <= 0) return BFA_STATUS_BADFLASH; - udelay(10000); + mdelay(10); } return BFA_STATUS_OK; } diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 4ad1187e82fb..675550fe8ee9 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2496,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) { int err; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) { - BNAD_UPDATE_CTR(bnad, tso_err); - return err; - } + err = skb_cow_head(skb, 0); + if (err < 0) { + BNAD_UPDATE_CTR(bnad, tso_err); + return err; } /* @@ -2669,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad) for (i = 0; i < bnad->msix_num; i++) bnad->msix_table[i].entry = i; - ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); - if (ret > 0) { - /* Not enough MSI-X vectors. */ + ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, + 1, bnad->msix_num); + if (ret < 0) { + goto intx_mode; + } else if (ret < bnad->msix_num) { pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", ret, bnad->msix_num); @@ -2684,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad) bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + BNAD_MAILBOX_MSIX_VECTORS; - if (bnad->msix_num > ret) + if (bnad->msix_num > ret) { + pci_disable_msix(bnad->pcidev); goto intx_mode; - - /* Try once more with adjusted numbers */ - /* If this fails, fall back to INTx */ - ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, - bnad->msix_num); - if (ret) - goto intx_mode; - - } else if (ret < 0) - goto intx_mode; + } + } pci_intx(bnad->pcidev, 0); @@ -2850,13 +2843,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, } if (unlikely((gso_size + skb_transport_offset(skb) + tcp_hdrlen(skb)) >= skb->len)) { - txqent->hdr.wi.opcode = - __constant_htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); txqent->hdr.wi.lso_mss = 0; BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); } else { - txqent->hdr.wi.opcode = - __constant_htons(BNA_TXQ_WI_SEND_LSO); + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO); txqent->hdr.wi.lso_mss = htons(gso_size); } @@ -2870,7 +2861,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); } else { - txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); txqent->hdr.wi.lso_mss = 0; if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { @@ -2881,11 +2872,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 proto = 0; - if (skb->protocol == __constant_htons(ETH_P_IP)) + if (skb->protocol == htons(ETH_P_IP)) proto = ip_hdr(skb)->protocol; #ifdef NETIF_F_IPV6_CSUM - else if (skb->protocol == - __constant_htons(ETH_P_IPV6)) { + else if (skb->protocol == htons(ETH_P_IPV6)) { /* nexthdr may not be TCP immediately. */ proto = ipv6_hdr(skb)->nexthdr; } @@ -2954,17 +2944,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) /* Sanity checks for the skb */ if (unlikely(skb->len <= ETH_HLEN)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_too_short); return NETDEV_TX_OK; } if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); return NETDEV_TX_OK; } if (unlikely(len == 0)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); return NETDEV_TX_OK; } @@ -2976,7 +2966,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) * and the netif_tx_stop_all_queues() call. */ if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_stopping); return NETDEV_TX_OK; } @@ -2989,7 +2979,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); return NETDEV_TX_OK; } @@ -3029,7 +3019,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) /* Program the opcode, flags, frame_len, num_vectors in WI */ if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } txqent->hdr.wi.reserved = 0; @@ -3055,7 +3045,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) /* Undo the changes starting at tcb->producer_index */ bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); return NETDEV_TX_OK; } @@ -3067,8 +3057,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) vect_id = 0; BNA_QE_INDX_INC(prod, q_depth); txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; - txqent->hdr.wi_ext.opcode = - __constant_htons(BNA_TXQ_WI_EXTENSION); + txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); unmap = &unmap_q[prod]; } @@ -3085,7 +3074,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) if (unlikely(len != skb->len)) { /* Undo the changes starting at tcb->producer_index */ bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 3190d38e16fb..ca97005e24b4 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -199,11 +199,6 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, return 0; } -static int macb_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - /** * macb_set_tx_clk() - Set a clock to a new frequency * @clk Pointer to the clock to change @@ -375,7 +370,6 @@ int macb_mii_init(struct macb *bp) bp->mii_bus->name = "MACB_mii_bus"; bp->mii_bus->read = &macb_mdio_read; bp->mii_bus->write = &macb_mdio_write; - bp->mii_bus->reset = &macb_mdio_reset; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; @@ -632,11 +626,16 @@ static void gem_rx_refill(struct macb *bp) "Unable to allocate sk_buff\n"); break; } - bp->rx_skbuff[entry] = skb; /* now fill corresponding descriptor entry */ paddr = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buffer_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, paddr)) { + dev_kfree_skb(skb); + break; + } + + bp->rx_skbuff[entry] = skb; if (entry == RX_RING_SIZE - 1) paddr |= MACB_BIT(RX_WRAP); @@ -725,7 +724,7 @@ static int gem_rx(struct macb *bp, int budget) skb_put(skb, len); addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); dma_unmap_single(&bp->pdev->dev, addr, - len, DMA_FROM_DEVICE); + bp->rx_buffer_size, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, bp->dev); skb_checksum_none_assert(skb); @@ -1036,11 +1035,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) } entry = macb_tx_ring_wrap(bp->tx_head); - bp->tx_head++; netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); mapping = dma_map_single(&bp->pdev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + dev_kfree_skb_any(skb); + goto unlock; + } + bp->tx_head++; tx_skb = &bp->tx_skb[entry]; tx_skb->skb = skb; tx_skb->mapping = mapping; @@ -1066,6 +1069,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) netif_stop_queue(dev); +unlock: spin_unlock_irqrestore(&bp->lock, flags); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index d2a183c3a6ce..521dfea44b83 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) /* Check tx error on the last segment */ if (desc_get_tx_ls(p)) { desc_get_tx_status(priv, p); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } priv->tx_skbuff[entry] = NULL; @@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } priv->tx_skbuff[entry] = skb; @@ -1169,7 +1169,7 @@ dma_err: desc = first; dma_unmap_single(priv->device, desc_get_buf_addr(desc), desc_get_buf_len(desc), DMA_TO_DEVICE); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 45d77334d7d9..07bbb711b7e5 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap) { struct msix_entry entries[SGE_QSETS + 1]; int vectors; - int i, err; + int i; vectors = ARRAY_SIZE(entries); for (i = 0; i < vectors; ++i) entries[i].entry = i; - while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) - vectors = err; - - if (err < 0) - pci_disable_msix(adap->pdev); - - if (!err && vectors < (adap->params.nports + 1)) { - pci_disable_msix(adap->pdev); - err = -1; - } + vectors = pci_enable_msix_range(adap->pdev, entries, + adap->params.nports + 1, vectors); + if (vectors < 0) + return vectors; - if (!err) { - for (i = 0; i < vectors; ++i) - adap->msix_info[i].vec = entries[i].vector; - adap->msix_nvectors = vectors; - } + for (i = 0; i < vectors; ++i) + adap->msix_info[i].vec = entries[i].vector; + adap->msix_nvectors = vectors; - return err; + return 0; } static void print_port_info(struct adapter *adap, const struct adapter_info *ai) diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 632b318eb38a..8b069f96e920 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, if (need_unmap) unmap_skb(d->skb, q, cidx, pdev); if (d->eop) { - kfree_skb(d->skb); + dev_consume_skb_any(d->skb); d->skb = NULL; } } @@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | V_WR_TID(q->token)); wr_gen2(d, gen); - kfree_skb(skb); + dev_consume_skb_any(skb); return; } @@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) * anything shorter than an Ethernet header. */ if (unlikely(skb->len < ETH_HLEN)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1f4b9b30b9ed..32db37709263 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -66,6 +66,7 @@ enum { SERNUM_LEN = 24, /* Serial # length */ EC_LEN = 16, /* E/C length */ ID_LEN = 16, /* ID length */ + PN_LEN = 16, /* Part Number length */ }; enum { @@ -254,6 +255,7 @@ struct vpd_params { u8 ec[EC_LEN + 1]; u8 sn[SERNUM_LEN + 1]; u8 id[ID_LEN + 1]; + u8 pn[PN_LEN + 1]; }; struct pci_params { @@ -306,6 +308,7 @@ struct adapter_params { unsigned char bypass; unsigned int ofldq_wr_cred; + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ }; #include "t4fw_api.h" @@ -497,6 +500,7 @@ struct sge_txq { spinlock_t db_lock; int db_disabled; unsigned short db_pidx; + unsigned short db_pidx_inc; u64 udb; }; @@ -553,8 +557,13 @@ struct sge { u32 pktshift; /* padding between CPL & packet data */ u32 fl_align; /* response queue message alignment */ u32 fl_starve_thres; /* Free List starvation threshold */ - unsigned int starve_thres; - u8 idma_state[2]; + + /* State variables for detecting an SGE Ingress DMA hang */ + unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */ + unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */ + unsigned int idma_state[2]; /* SGE IDMA Hang detect state */ + unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ + unsigned int egr_start; unsigned int ingr_start; void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ @@ -957,7 +966,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity); int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity); - +const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, @@ -1029,4 +1038,5 @@ void t4_db_dropped(struct adapter *adapter); int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); +void t4_sge_decode_idma_state(struct adapter *adapter, int state); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 34e2488767d9..6fe58913403a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -254,6 +254,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { CH_DEVICE(0x5011, 4), CH_DEVICE(0x5012, 4), CH_DEVICE(0x5013, 4), + CH_DEVICE(0x5014, 4), + CH_DEVICE(0x5015, 4), + CH_DEVICE(0x5080, 4), + CH_DEVICE(0x5081, 4), + CH_DEVICE(0x5082, 4), + CH_DEVICE(0x5083, 4), + CH_DEVICE(0x5084, 4), + CH_DEVICE(0x5085, 4), CH_DEVICE(0x5401, 4), CH_DEVICE(0x5402, 4), CH_DEVICE(0x5403, 4), @@ -273,6 +281,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { CH_DEVICE(0x5411, 4), CH_DEVICE(0x5412, 4), CH_DEVICE(0x5413, 4), + CH_DEVICE(0x5414, 4), + CH_DEVICE(0x5415, 4), + CH_DEVICE(0x5480, 4), + CH_DEVICE(0x5481, 4), + CH_DEVICE(0x5482, 4), + CH_DEVICE(0x5483, 4), + CH_DEVICE(0x5484, 4), + CH_DEVICE(0x5485, 4), { 0, } }; @@ -423,15 +439,18 @@ static void link_report(struct net_device *dev) const struct port_info *p = netdev_priv(dev); switch (p->link_cfg.speed) { - case SPEED_10000: + case 10000: s = "10Gbps"; break; - case SPEED_1000: + case 1000: s = "1000Mbps"; break; - case SPEED_100: + case 100: s = "100Mbps"; break; + case 40000: + s = "40Gbps"; + break; } netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, @@ -2061,7 +2080,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0x40200, 0x40298, 0x402ac, 0x4033c, 0x403f8, 0x403fc, - 0x41300, 0x413c4, + 0x41304, 0x413c4, 0x41400, 0x4141c, 0x41480, 0x414d0, 0x44000, 0x44078, @@ -2089,7 +2108,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0x48200, 0x48298, 0x482ac, 0x4833c, 0x483f8, 0x483fc, - 0x49300, 0x493c4, + 0x49304, 0x493c4, 0x49400, 0x4941c, 0x49480, 0x494d0, 0x4c000, 0x4c078, @@ -2199,6 +2218,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) else if (type == FW_PORT_TYPE_FIBER_XFI || type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) v |= SUPPORTED_FIBRE; + else if (type == FW_PORT_TYPE_BP40_BA) + v |= SUPPORTED_40000baseSR4_Full; if (caps & FW_PORT_CAP_ANEG) v |= SUPPORTED_Autoneg; @@ -2215,6 +2236,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps) v |= FW_PORT_CAP_SPEED_1G; if (caps & ADVERTISED_10000baseT_Full) v |= FW_PORT_CAP_SPEED_10G; + if (caps & ADVERTISED_40000baseSR4_Full) + v |= FW_PORT_CAP_SPEED_40G; return v; } @@ -2263,12 +2286,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static unsigned int speed_to_caps(int speed) { - if (speed == SPEED_100) + if (speed == 100) return FW_PORT_CAP_SPEED_100M; - if (speed == SPEED_1000) + if (speed == 1000) return FW_PORT_CAP_SPEED_1G; - if (speed == SPEED_10000) + if (speed == 10000) return FW_PORT_CAP_SPEED_10G; + if (speed == 40000) + return FW_PORT_CAP_SPEED_40G; return 0; } @@ -2296,8 +2321,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->autoneg == AUTONEG_DISABLE) { cap = speed_to_caps(speed); - if (!(lc->supported & cap) || (speed == SPEED_1000) || - (speed == SPEED_10000)) + if (!(lc->supported & cap) || + (speed == 1000) || + (speed == 10000) || + (speed == 40000)) return -EINVAL; lc->requested_speed = cap; lc->advertising = 0; @@ -3205,8 +3232,8 @@ static int cxgb4_clip_get(const struct net_device *dev, c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE); c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); - *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); - *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + c.ip_hi = *(__be64 *)(lip->s6_addr); + c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); } @@ -3221,8 +3248,8 @@ static int cxgb4_clip_release(const struct net_device *dev, c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | FW_CMD_REQUEST | FW_CMD_READ); c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); - *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); - *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + c.ip_hi = *(__be64 *)(lip->s6_addr); + c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); } @@ -3563,14 +3590,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs) static void disable_txq_db(struct sge_txq *q) { - spin_lock_irq(&q->db_lock); + unsigned long flags; + + spin_lock_irqsave(&q->db_lock, flags); q->db_disabled = 1; - spin_unlock_irq(&q->db_lock); + spin_unlock_irqrestore(&q->db_lock, flags); } -static void enable_txq_db(struct sge_txq *q) +static void enable_txq_db(struct adapter *adap, struct sge_txq *q) { spin_lock_irq(&q->db_lock); + if (q->db_pidx_inc) { + /* Make sure that all writes to the TX descriptors + * are committed before we tell HW about them. + */ + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(q->db_pidx_inc)); + q->db_pidx_inc = 0; + } q->db_disabled = 0; spin_unlock_irq(&q->db_lock); } @@ -3592,11 +3630,32 @@ static void enable_dbs(struct adapter *adap) int i; for_each_ethrxq(&adap->sge, i) - enable_txq_db(&adap->sge.ethtxq[i].q); + enable_txq_db(adap, &adap->sge.ethtxq[i].q); for_each_ofldrxq(&adap->sge, i) - enable_txq_db(&adap->sge.ofldtxq[i].q); + enable_txq_db(adap, &adap->sge.ofldtxq[i].q); for_each_port(adap, i) - enable_txq_db(&adap->sge.ctrlq[i].q); + enable_txq_db(adap, &adap->sge.ctrlq[i].q); +} + +static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) +{ + if (adap->uld_handle[CXGB4_ULD_RDMA]) + ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], + cmd); +} + +static void process_db_full(struct work_struct *work) +{ + struct adapter *adap; + + adap = container_of(work, struct adapter, db_full_task); + + drain_db_fifo(adap, dbfifo_drain_delay); + enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, + DBFIFO_HP_INT | DBFIFO_LP_INT); } static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) @@ -3604,7 +3663,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) u16 hw_pidx, hw_cidx; int ret; - spin_lock_bh(&q->db_lock); + spin_lock_irq(&q->db_lock); ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); if (ret) goto out; @@ -3621,7 +3680,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) } out: q->db_disabled = 0; - spin_unlock_bh(&q->db_lock); + q->db_pidx_inc = 0; + spin_unlock_irq(&q->db_lock); if (ret) CH_WARN(adap, "DB drop recovery failed.\n"); } @@ -3637,29 +3697,6 @@ static void recover_all_queues(struct adapter *adap) sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); } -static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) -{ - mutex_lock(&uld_mutex); - if (adap->uld_handle[CXGB4_ULD_RDMA]) - ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], - cmd); - mutex_unlock(&uld_mutex); -} - -static void process_db_full(struct work_struct *work) -{ - struct adapter *adap; - - adap = container_of(work, struct adapter, db_full_task); - - notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); - drain_db_fifo(adap, dbfifo_drain_delay); - t4_set_reg_field(adap, SGE_INT_ENABLE3, - DBFIFO_HP_INT | DBFIFO_LP_INT, - DBFIFO_HP_INT | DBFIFO_LP_INT); - notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); -} - static void process_db_drop(struct work_struct *work) { struct adapter *adap; @@ -3667,11 +3704,13 @@ static void process_db_drop(struct work_struct *work) adap = container_of(work, struct adapter, db_drop_task); if (is_t4(adap->params.chip)) { - disable_dbs(adap); + drain_db_fifo(adap, dbfifo_drain_delay); notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); - drain_db_fifo(adap, 1); + drain_db_fifo(adap, dbfifo_drain_delay); recover_all_queues(adap); + drain_db_fifo(adap, dbfifo_drain_delay); enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); } else { u32 dropped_db = t4_read_reg(adap, 0x010ac); u16 qid = (dropped_db >> 15) & 0x1ffff; @@ -3712,6 +3751,8 @@ static void process_db_drop(struct work_struct *work) void t4_db_full(struct adapter *adap) { if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); t4_set_reg_field(adap, SGE_INT_ENABLE3, DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); @@ -3720,8 +3761,11 @@ void t4_db_full(struct adapter *adap) void t4_db_dropped(struct adapter *adap) { - if (is_t4(adap->params.chip)) - queue_work(workq, &adap->db_drop_task); + if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); + } + queue_work(workq, &adap->db_drop_task); } static void uld_attach(struct adapter *adap, unsigned int uld) @@ -3765,6 +3809,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.dbfifo_int_thresh = dbfifo_int_thresh; lli.sge_pktshift = adap->sge.pktshift; lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; handle = ulds[uld].add(&lli); if (IS_ERR(handle)) { @@ -5370,6 +5415,21 @@ static int adap_init0(struct adapter *adap) (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); /* + * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL + * capability. Earlier versions of the firmware didn't have the + * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no + * permission to use ULPTX MEMWRITE DSGL. + */ + if (is_t4(adap->params.chip)) { + adap->params.ulptx_memwrite_dsgl = false; + } else { + params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, + 1, params, val); + adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); + } + + /* * Get device capabilities so we can determine what resources we need * to manage. */ @@ -5603,9 +5663,10 @@ static const struct pci_error_handlers cxgb4_eeh = { .resume = eeh_resume, }; -static inline bool is_10g_port(const struct link_config *lc) +static inline bool is_x_10g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || + (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; } static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, @@ -5629,7 +5690,7 @@ static void cfg_queues(struct adapter *adap) int i, q10g = 0, n10g = 0, qidx = 0; for_each_port(adap, i) - n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); + n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); /* * We default to 1 queue per non-10G port and up to # of cores queues @@ -5644,7 +5705,7 @@ static void cfg_queues(struct adapter *adap) struct port_info *pi = adap2pinfo(adap, i); pi->first_qset = qidx; - pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; qidx += pi->nqsets; } @@ -5737,7 +5798,7 @@ static void reduce_ethqs(struct adapter *adap, int n) static int enable_msix(struct adapter *adap) { int ofld_need = 0; - int i, err, want, need; + int i, want, need; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; struct msix_entry entries[MAX_INGQ + 1]; @@ -5753,32 +5814,30 @@ static int enable_msix(struct adapter *adap) } need = adap->params.nports + EXTRA_VECS + ofld_need; - while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) - want = err; + want = pci_enable_msix_range(adap->pdev, entries, need, want); + if (want < 0) + return want; - if (!err) { - /* - * Distribute available vectors to the various queue groups. - * Every group gets its minimum requirement and NIC gets top - * priority for leftovers. - */ - i = want - EXTRA_VECS - ofld_need; - if (i < s->max_ethqsets) { - s->max_ethqsets = i; - if (i < s->ethqsets) - reduce_ethqs(adap, i); - } - if (is_offload(adap)) { - i = want - EXTRA_VECS - s->max_ethqsets; - i -= ofld_need - nchan; - s->ofldqsets = (i / nchan) * nchan; /* round down */ - } - for (i = 0; i < want; ++i) - adap->msix_info[i].vec = entries[i].vector; - } else if (err > 0) - dev_info(adap->pdev_dev, - "only %d MSI-X vectors left, not using MSI-X\n", err); - return err; + /* + * Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + i = want - EXTRA_VECS - ofld_need; + if (i < s->max_ethqsets) { + s->max_ethqsets = i; + if (i < s->ethqsets) + reduce_ethqs(adap, i); + } + if (is_offload(adap)) { + i = want - EXTRA_VECS - s->max_ethqsets; + i -= ofld_need - nchan; + s->ofldqsets = (i / nchan) * nchan; /* round down */ + } + for (i = 0; i < want; ++i) + adap->msix_info[i].vec = entries[i].vector; + + return 0; } #undef EXTRA_VECS @@ -5801,11 +5860,6 @@ static int init_rss(struct adapter *adap) static void print_port_info(const struct net_device *dev) { - static const char *base[] = { - "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", - "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4" - }; - char buf[80]; char *bufp = buf; const char *spd = ""; @@ -5823,9 +5877,11 @@ static void print_port_info(const struct net_device *dev) bufp += sprintf(bufp, "1000/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + bufp += sprintf(bufp, "40G/"); if (bufp != buf) --bufp; - sprintf(bufp, "BASE-%s", base[pi->port_type]); + sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", adap->params.vpd.id, @@ -5833,8 +5889,8 @@ static void print_port_info(const struct net_device *dev) is_offload(adap) ? "R" : "", adap->params.pci.width, spd, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); - netdev_info(dev, "S/N: %s, E/C: %s\n", - adap->params.vpd.sn, adap->params.vpd.ec); + netdev_info(dev, "S/N: %s, P/N: %s\n", + adap->params.vpd.sn, adap->params.vpd.pn); } static void enable_pcie_relaxed_ordering(struct pci_dev *dev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 4dd0a82533e4..e274a047528f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -253,6 +253,7 @@ struct cxgb4_lld_info { /* packet data */ bool enable_fw_ofld_conn; /* Enable connection through fw */ /* WR */ + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ }; struct cxgb4_uld_info { diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 47ffa64fcf19..ca95cf2954eb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -93,6 +93,16 @@ */ #define TX_QCHECK_PERIOD (HZ / 2) +/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate + * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA + * State Machines in the same state for this amount of time (in HZ) then we'll + * issue a warning about a potential hang. We'll repeat the warning as the + * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till + * the situation clears. If the situation clears, we'll note that as well. + */ +#define SGE_IDMA_WARN_THRESH (1 * HZ) +#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD) + /* * Max number of Tx descriptors to be reclaimed by the Tx timer. */ @@ -373,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q, if (d->skb) { /* an SGL is present */ if (unmap) unmap_sgl(dev, d->skb, d->sgl, q); - kfree_skb(d->skb); + dev_consume_skb_any(d->skb); d->skb = NULL; } ++d; @@ -706,11 +716,17 @@ static inline unsigned int flits_to_desc(unsigned int n) * @skb: the packet * * Returns whether an Ethernet packet is small enough to fit as - * immediate data. + * immediate data. Return value corresponds to headroom required. */ static inline int is_eth_imm(const struct sk_buff *skb) { - return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); + int hdrlen = skb_shinfo(skb)->gso_size ? + sizeof(struct cpl_tx_pkt_lso_core) : 0; + + hdrlen += sizeof(struct cpl_tx_pkt); + if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) + return hdrlen; + return 0; } /** @@ -723,9 +739,10 @@ static inline int is_eth_imm(const struct sk_buff *skb) static inline unsigned int calc_tx_flits(const struct sk_buff *skb) { unsigned int flits; + int hdrlen = is_eth_imm(skb); - if (is_eth_imm(skb)) - return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); + if (hdrlen) + return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; if (skb_shinfo(skb)->gso_size) @@ -843,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { unsigned int *wr, index; + unsigned long flags; wmb(); /* write descriptors before telling HW */ - spin_lock(&q->db_lock); + spin_lock_irqsave(&q->db_lock, flags); if (!q->db_disabled) { if (is_t4(adap->params.chip)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), @@ -861,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) writel(n, adap->bar2 + q->udb + 8); wmb(); } - } + } else + q->db_pidx_inc += n; q->db_pidx = q->pidx; - spin_unlock(&q->db_lock); + spin_unlock_irqrestore(&q->db_lock, flags); } /** @@ -971,6 +990,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n) */ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { + int len; u32 wr_mid; u64 cntrl, *end; int qidx, credits; @@ -982,13 +1002,14 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct cpl_tx_pkt_core *cpl; const struct skb_shared_info *ssi; dma_addr_t addr[MAX_SKB_FRAGS + 1]; + bool immediate = false; /* * The chip min packet length is 10 octets but play safe and reject * anything shorter than an Ethernet header. */ if (unlikely(skb->len < ETH_HLEN)) { -out_free: dev_kfree_skb(skb); +out_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1011,7 +1032,10 @@ out_free: dev_kfree_skb(skb); return NETDEV_TX_BUSY; } - if (!is_eth_imm(skb) && + if (is_eth_imm(skb)) + immediate = true; + + if (!immediate && unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { q->mapping_err++; goto out_free; @@ -1028,6 +1052,7 @@ out_free: dev_kfree_skb(skb); wr->r3 = cpu_to_be64(0); end = (u64 *)wr + flits; + len = immediate ? skb->len : 0; ssi = skb_shinfo(skb); if (ssi->gso_size) { struct cpl_tx_pkt_lso *lso = (void *)wr; @@ -1035,8 +1060,9 @@ out_free: dev_kfree_skb(skb); int l3hdr_len = skb_network_header_len(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + len += sizeof(*lso); wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(sizeof(*lso))); + FW_WR_IMMDLEN(len)); lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | LSO_FIRST_SLICE | LSO_LAST_SLICE | LSO_IPV6(v6) | @@ -1054,9 +1080,7 @@ out_free: dev_kfree_skb(skb); q->tso++; q->tx_cso += ssi->gso_segs; } else { - int len; - - len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + len += sizeof(*cpl); wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | FW_WR_IMMDLEN(len)); cpl = (void *)(wr + 1); @@ -1078,9 +1102,9 @@ out_free: dev_kfree_skb(skb); cpl->len = htons(skb->len); cpl->ctrl1 = cpu_to_be64(cntrl); - if (is_eth_imm(skb)) { + if (immediate) { inline_tx_skb(skb, &q->q, cpl + 1); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } else { int last_desc; @@ -1467,8 +1491,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) { unsigned int idx = skb_txq(skb); - if (unlikely(is_ctrl_pkt(skb))) + if (unlikely(is_ctrl_pkt(skb))) { + /* Single ctrl queue is a requirement for LE workaround path */ + if (adap->tids.nsftids) + idx = 0; return ctrl_xmit(&adap->sge.ctrlq[idx], skb); + } return ofld_xmit(&adap->sge.ofldtxq[idx], skb); } @@ -1992,7 +2020,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap) static void sge_rx_timer_cb(unsigned long data) { unsigned long m; - unsigned int i, cnt[2]; + unsigned int i, idma_same_state_cnt[2]; struct adapter *adap = (struct adapter *)data; struct sge *s = &adap->sge; @@ -2015,21 +2043,64 @@ static void sge_rx_timer_cb(unsigned long data) } t4_write_reg(adap, SGE_DEBUG_INDEX, 13); - cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); - cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); - - for (i = 0; i < 2; i++) - if (cnt[i] >= s->starve_thres) { - if (s->idma_state[i] || cnt[i] == 0xffffffff) - continue; - s->idma_state[i] = 1; - t4_write_reg(adap, SGE_DEBUG_INDEX, 11); - m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); - dev_warn(adap->pdev_dev, - "SGE idma%u starvation detected for " - "queue %lu\n", i, m & 0xffff); - } else if (s->idma_state[i]) - s->idma_state[i] = 0; + idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); + idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); + + for (i = 0; i < 2; i++) { + u32 debug0, debug11; + + /* If the Ingress DMA Same State Counter ("timer") is less + * than 1s, then we can reset our synthesized Stall Timer and + * continue. If we have previously emitted warnings about a + * potential stalled Ingress Queue, issue a note indicating + * that the Ingress Queue has resumed forward progress. + */ + if (idma_same_state_cnt[i] < s->idma_1s_thresh) { + if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH) + CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n", + i, s->idma_qid[i], + s->idma_stalled[i]/HZ); + s->idma_stalled[i] = 0; + continue; + } + + /* Synthesize an SGE Ingress DMA Same State Timer in the Hz + * domain. The first time we get here it'll be because we + * passed the 1s Threshold; each additional time it'll be + * because the RX Timer Callback is being fired on its regular + * schedule. + * + * If the stall is below our Potential Hung Ingress Queue + * Warning Threshold, continue. + */ + if (s->idma_stalled[i] == 0) + s->idma_stalled[i] = HZ; + else + s->idma_stalled[i] += RX_QCHECK_PERIOD; + + if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH) + continue; + + /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */ + if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0) + continue; + + /* Read and save the SGE IDMA State and Queue ID information. + * We do this every time in case it changes across time ... + */ + t4_write_reg(adap, SGE_DEBUG_INDEX, 0); + debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); + s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; + + t4_write_reg(adap, SGE_DEBUG_INDEX, 11); + debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); + s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; + + CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n", + i, s->idma_qid[i], s->idma_state[i], + s->idma_stalled[i]/HZ, debug0, debug11); + t4_sge_decode_idma_state(adap, s->idma_state[i]); + } mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); } @@ -2580,11 +2651,19 @@ static int t4_sge_init_soft(struct adapter *adap) fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + #undef READ_FL_BUF + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ if (fl_small_pg != PAGE_SIZE || - (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || - (fl_large_pg & (fl_large_pg-1)) != 0))) { + (fl_large_pg & (fl_large_pg-1)) != 0) { dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", fl_small_pg, fl_large_pg); return -EINVAL; @@ -2699,8 +2778,8 @@ static int t4_sge_init_hard(struct adapter *adap) int t4_sge_init(struct adapter *adap) { struct sge *s = &adap->sge; - u32 sge_control; - int ret; + u32 sge_control, sge_conm_ctrl; + int ret, egress_threshold; /* * Ingress Padding Boundary and Egress Status Page Size are set up by @@ -2725,15 +2804,24 @@ int t4_sge_init(struct adapter *adap) * SGE's Egress Congestion Threshold. If it isn't, then we can get * stuck waiting for new packets while the SGE is waiting for us to * give it more Free List entries. (Note that the SGE's Egress - * Congestion Threshold is in units of 2 Free List pointers.) + * Congestion Threshold is in units of 2 Free List pointers.) For T4, + * there was only a single field to control this. For T5 there's the + * original field which now only applies to Unpacked Mode Free List + * buffers and a new field which only applies to Packed Mode Free List + * buffers. */ - s->fl_starve_thres - = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1; + sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL); + if (is_t4(adap->params.chip)) + egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl); + else + egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl); + s->fl_starve_thres = 2*egress_threshold + 1; setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); - s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ - s->idma_state[0] = s->idma_state[1] = 0; + s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */ + s->idma_stalled[0] = 0; + s->idma_stalled[1] = 0; spin_lock_init(&s->intrq_lock); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2c109343d570..fb2fe65903c2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { u32 cclk_param, cclk_val; int i, ret, addr; - int ec, sn; + int ec, sn, pn; u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; @@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); + FIND_VPD_KW(pn, "PN"); #undef FIND_VPD_KW memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); @@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strim(p->sn); + memcpy(p->pn, vpd + pn, min(i, PN_LEN)); + strim(p->pn); /* * Ask firmware for the Core Clock since it knows how to translate the @@ -1155,7 +1158,8 @@ out: } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ + FW_PORT_CAP_ANEG) /** * t4_link_start - apply link configuration to MAC/PHY @@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx) } /** + * t4_get_port_type_description - return Port Type string description + * @port_type: firmware Port Type enumeration + */ +const char *t4_get_port_type_description(enum fw_port_type port_type) +{ + static const char *const port_type_description[] = { + "R XFI", + "R XAUI", + "T SGMII", + "T XFI", + "T XAUI", + "KX4", + "CX4", + "KX", + "KR", + "R SFP+", + "KR/KX", + "KR/KX/KX4", + "R QSFP_10G", + "", + "R QSFP", + "R BP40_BA", + }; + + if (port_type < ARRAY_SIZE(port_type_description)) + return port_type_description[port_type]; + return "UNKNOWN"; +} + +/** * t4_get_port_stats - collect port statistics * @adap: the adapter * @idx: the port index @@ -2563,6 +2597,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, } /** + * t4_sge_decode_idma_state - decode the idma state + * @adap: the adapter + * @state: the state idma is stuck in + */ +void t4_sge_decode_idma_state(struct adapter *adapter, int state) +{ + static const char * const t4_decode[] = { + "IDMA_IDLE", + "IDMA_PUSH_MORE_CPL_FIFO", + "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", + "Not used", + "IDMA_PHYSADDR_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", + "IDMA_PHYSADDR_SEND_PAYLOAD", + "IDMA_SEND_FIFO_TO_IMSG", + "IDMA_FL_REQ_DATA_FL_PREP", + "IDMA_FL_REQ_DATA_FL", + "IDMA_FL_DROP", + "IDMA_FL_H_REQ_HEADER_FL", + "IDMA_FL_H_SEND_PCIEHDR", + "IDMA_FL_H_PUSH_CPL_FIFO", + "IDMA_FL_H_SEND_CPL", + "IDMA_FL_H_SEND_IP_HDR_FIRST", + "IDMA_FL_H_SEND_IP_HDR", + "IDMA_FL_H_REQ_NEXT_HEADER_FL", + "IDMA_FL_H_SEND_NEXT_PCIEHDR", + "IDMA_FL_H_SEND_IP_HDR_PADDING", + "IDMA_FL_D_SEND_PCIEHDR", + "IDMA_FL_D_SEND_CPL_AND_IP_HDR", + "IDMA_FL_D_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_PCIEHDR", + "IDMA_FL_PUSH_CPL_FIFO", + "IDMA_FL_SEND_CPL", + "IDMA_FL_SEND_PAYLOAD_FIRST", + "IDMA_FL_SEND_PAYLOAD", + "IDMA_FL_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_NEXT_PCIEHDR", + "IDMA_FL_SEND_PADDING", + "IDMA_FL_SEND_COMPLETION_TO_IMSG", + "IDMA_FL_SEND_FIFO_TO_IMSG", + "IDMA_FL_REQ_DATAFL_DONE", + "IDMA_FL_REQ_HEADERFL_DONE", + }; + static const char * const t5_decode[] = { + "IDMA_IDLE", + "IDMA_ALMOST_IDLE", + "IDMA_PUSH_MORE_CPL_FIFO", + "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", + "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", + "IDMA_PHYSADDR_SEND_PAYLOAD", + "IDMA_SEND_FIFO_TO_IMSG", + "IDMA_FL_REQ_DATA_FL", + "IDMA_FL_DROP", + "IDMA_FL_DROP_SEND_INC", + "IDMA_FL_H_REQ_HEADER_FL", + "IDMA_FL_H_SEND_PCIEHDR", + "IDMA_FL_H_PUSH_CPL_FIFO", + "IDMA_FL_H_SEND_CPL", + "IDMA_FL_H_SEND_IP_HDR_FIRST", + "IDMA_FL_H_SEND_IP_HDR", + "IDMA_FL_H_REQ_NEXT_HEADER_FL", + "IDMA_FL_H_SEND_NEXT_PCIEHDR", + "IDMA_FL_H_SEND_IP_HDR_PADDING", + "IDMA_FL_D_SEND_PCIEHDR", + "IDMA_FL_D_SEND_CPL_AND_IP_HDR", + "IDMA_FL_D_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_PCIEHDR", + "IDMA_FL_PUSH_CPL_FIFO", + "IDMA_FL_SEND_CPL", + "IDMA_FL_SEND_PAYLOAD_FIRST", + "IDMA_FL_SEND_PAYLOAD", + "IDMA_FL_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_NEXT_PCIEHDR", + "IDMA_FL_SEND_PADDING", + "IDMA_FL_SEND_COMPLETION_TO_IMSG", + }; + static const u32 sge_regs[] = { + SGE_DEBUG_DATA_LOW_INDEX_2, + SGE_DEBUG_DATA_LOW_INDEX_3, + SGE_DEBUG_DATA_HIGH_INDEX_10, + }; + const char **sge_idma_decode; + int sge_idma_decode_nstates; + int i; + + if (is_t4(adapter->params.chip)) { + sge_idma_decode = (const char **)t4_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); + } else { + sge_idma_decode = (const char **)t5_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); + } + + if (state < sge_idma_decode_nstates) + CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); + else + CH_WARN(adapter, "idma state %d unknown\n", state); + + for (i = 0; i < ARRAY_SIZE(sge_regs); i++) + CH_WARN(adapter, "SGE register %#x value %#x\n", + sge_regs[i], t4_read_reg(adapter, sge_regs[i])); +} + +/** * t4_fw_hello - establish communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command @@ -3533,11 +3673,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) if (stat & FW_PORT_CMD_TXPAUSE) fc |= PAUSE_TX; if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) - speed = SPEED_100; + speed = 100; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) - speed = SPEED_1000; + speed = 1000; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) - speed = SPEED_10000; + speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + speed = 40000; if (link_ok != lc->link_ok || speed != lc->speed || fc != lc->fc) { /* something changed */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index cd6874b571ee..f2738c710789 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -116,6 +116,7 @@ enum CPL_error { CPL_ERR_KEEPALIVE_TIMEDOUT = 34, CPL_ERR_RTX_NEG_ADVICE = 35, CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_KEEPALV_NEG_ADVICE = 37, CPL_ERR_ABORT_FAILED = 42, CPL_ERR_IWARP_FLM = 50, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 4082522d8140..225ad8a5722d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -230,6 +230,12 @@ #define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) #define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) +#define EGRTHRESHOLDPACKING_MASK 0x3fU +#define EGRTHRESHOLDPACKING_SHIFT 14 +#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT) +#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \ + EGRTHRESHOLDPACKING_MASK) + #define SGE_DBFIFO_STATUS 0x10a4 #define HP_INT_THRESH_SHIFT 28 #define HP_INT_THRESH_MASK 0xfU @@ -278,6 +284,9 @@ #define SGE_DEBUG_INDEX 0x10cc #define SGE_DEBUG_DATA_HIGH 0x10d0 #define SGE_DEBUG_DATA_LOW 0x10d4 +#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8 +#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc +#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 #define S_HP_INT_THRESH 28 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 74fea74ce0aa..9cc973fbcf26 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -932,6 +932,7 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_FWREV = 0x0B, FW_PARAMS_PARAM_DEV_TPREV = 0x0C, FW_PARAMS_PARAM_DEV_CF = 0x0D, + FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, }; /* @@ -1742,6 +1743,9 @@ enum fw_port_type { FW_PORT_TYPE_SFP, FW_PORT_TYPE_BP_AP, FW_PORT_TYPE_BP4_AP, + FW_PORT_TYPE_QSFP_10G, + FW_PORT_TYPE_QSFP, + FW_PORT_TYPE_BP40_BA, FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0899c0983594..52859288de7b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n) */ static int enable_msix(struct adapter *adapter) { - int i, err, want, need; + int i, want, need, nqsets; struct msix_entry entries[MSIX_ENTRIES]; struct sge *s = &adapter->sge; @@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter) */ want = s->max_ethqsets + MSIX_EXTRAS; need = adapter->params.nports + MSIX_EXTRAS; - while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need) - want = err; - if (err == 0) { - int nqsets = want - MSIX_EXTRAS; - if (nqsets < s->max_ethqsets) { - dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" - " for %d Queue Sets\n", nqsets); - s->max_ethqsets = nqsets; - if (nqsets < s->ethqsets) - reduce_ethqs(adapter, nqsets); - } - for (i = 0; i < want; ++i) - adapter->msix_info[i].vec = entries[i].vector; - } else if (err > 0) { - pci_disable_msix(adapter->pdev); - dev_info(adapter->pdev_dev, "only %d MSI-X vectors left," - " not using MSI-X\n", err); + want = pci_enable_msix_range(adapter->pdev, entries, need, want); + if (want < 0) + return want; + + nqsets = want - MSIX_EXTRAS; + if (nqsets < s->max_ethqsets) { + dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" + " for %d Queue Sets\n", nqsets); + s->max_ethqsets = nqsets; + if (nqsets < s->ethqsets) + reduce_ethqs(adapter, nqsets); } - return err; + for (i = 0; i < want; ++i) + adapter->msix_info[i].vec = entries[i].vector; + + return 0; } static const struct net_device_ops cxgb4vf_netdev_ops = { @@ -2947,6 +2944,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = { CH_DEVICE(0x5811, 0), /* T520-lp-cr */ CH_DEVICE(0x5812, 0), /* T560-cr */ CH_DEVICE(0x5813, 0), /* T580-cr */ + CH_DEVICE(0x5814, 0), /* T580-so-cr */ + CH_DEVICE(0x5815, 0), /* T502-bt */ + CH_DEVICE(0x5880, 0), + CH_DEVICE(0x5881, 0), + CH_DEVICE(0x5882, 0), + CH_DEVICE(0x5883, 0), + CH_DEVICE(0x5884, 0), + CH_DEVICE(0x5885, 0), { 0, } }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 0a89963c48ce..9cfa4b4bb089 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, if (sdesc->skb) { if (need_unmap) unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); - kfree_skb(sdesc->skb); + dev_consume_skb_any(sdesc->skb); sdesc->skb = NULL; } @@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * need it any longer. */ inline_tx_skb(skb, &txq->q, cpl + 1); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } else { /* * Write the skb's Scatter/Gather list into the TX Packet CPL @@ -1354,7 +1354,7 @@ out_free: * An error of some sort happened. Free the TX skb and tell the * OS that we've "dealt" with the packet ... */ - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 19f642a45f40..fe84fbabc0d4 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev) writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1); spin_unlock_irqrestore(&lp->lock, flags); dev->stats.tx_bytes += skb->len; - dev_kfree_skb(skb); + dev_consume_skb_any(skb); /* We DO NOT call netif_wake_queue() here. * We also DO NOT call netif_start_queue(). diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b740bfce72ef..2945718ce806 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, unsigned int txq_map; if (skb->len <= 0) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (skb_shinfo(skb)->gso_size == 0 && skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1086,14 +1086,15 @@ static int enic_poll(struct napi_struct *napi, int budget) unsigned int intr = enic_legacy_io_intr(); unsigned int rq_work_to_do = budget; unsigned int wq_work_to_do = -1; /* no limit */ - unsigned int work_done, rq_work_done, wq_work_done; + unsigned int work_done, rq_work_done = 0, wq_work_done; int err; /* Service RQ (first) and WQ */ - rq_work_done = vnic_cq_service(&enic->cq[cq_rq], - rq_work_to_do, enic_rq_service, NULL); + if (budget > 0) + rq_work_done = vnic_cq_service(&enic->cq[cq_rq], + rq_work_to_do, enic_rq_service, NULL); wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, enic_wq_service, NULL); @@ -1141,14 +1142,15 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) unsigned int cq = enic_cq_rq(enic, rq); unsigned int intr = enic_msix_rq_intr(enic, rq); unsigned int work_to_do = budget; - unsigned int work_done; + unsigned int work_done = 0; int err; /* Service RQ */ - work_done = vnic_cq_service(&enic->cq[cq], - work_to_do, enic_rq_service, NULL); + if (budget > 0) + work_done = vnic_cq_service(&enic->cq[cq], + work_to_do, enic_rq_service, NULL); /* Return intr event credits for this polling * cycle. An intr event is the completion of a @@ -1796,7 +1798,8 @@ static int enic_set_intr_mode(struct enic *enic) enic->cq_count >= n + m && enic->intr_count >= n + m + 2) { - if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { + if (pci_enable_msix_range(enic->pdev, enic->msix_entry, + n + m + 2, n + m + 2) > 0) { enic->rq_count = n; enic->wq_count = m; @@ -1815,7 +1818,8 @@ static int enic_set_intr_mode(struct enic *enic) enic->wq_count >= m && enic->cq_count >= 1 + m && enic->intr_count >= 1 + m + 2) { - if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) { + if (pci_enable_msix_range(enic->pdev, enic->msix_entry, + 1 + m + 2, 1 + m + 2) > 0) { enic->rq_count = 1; enic->wq_count = m; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index a1a2b4028a5c..8c4b93be333b 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&db->lock, flags); /* free this SKB */ - dev_kfree_skb(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 5ad9e3e3c0b8..53f0c618045c 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -696,7 +696,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, /* Too large packet check */ if (skb->len > MAX_PACKET_SIZE) { pr_err("big packet = %d\n", (u16)skb->len); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -743,7 +743,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, dw32(DCR7, db->cr7_data); /* free this SKB */ - dev_kfree_skb(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index aa4ee385091f..aa801a6af7b9 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -607,7 +607,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, /* Too large packet check */ if (skb->len > MAX_PACKET_SIZE) { netdev_err(dev, "big packet = %d\n", (u16)skb->len); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -648,7 +648,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, uw32(DCR7, db->cr7_data); /* free this SKB */ - dev_kfree_skb(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 113cd799a131..d9e5ca0d48c1 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1137,7 +1137,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; drop_frame: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); np->tx_skbuff[entry] = NULL; dev->stats.tx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 8a79a32a5674..e9b0faba3078 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -170,11 +170,6 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, return 0; } -static int dnet_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - static void dnet_handle_link_change(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); @@ -322,7 +317,6 @@ static int dnet_mii_init(struct dnet *bp) bp->mii_bus->name = "dnet_mii_bus"; bp->mii_bus->read = &dnet_mdio_read; bp->mii_bus->write = &dnet_mdio_write; - bp->mii_bus->reset = &dnet_mdio_reset; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", bp->pdev->name, bp->pdev->id); diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig index 231129dd1764..ea94a8eb6b35 100644 --- a/drivers/net/ethernet/emulex/benet/Kconfig +++ b/drivers/net/ethernet/emulex/benet/Kconfig @@ -4,3 +4,11 @@ config BE2NET ---help--- This driver implements the NIC functionality for ServerEngines' 10Gbps network adapter - BladeEngine. + +config BE2NET_VXLAN + bool "VXLAN offload support on be2net driver" + default y + depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m) + ---help--- + Say Y here if you want to enable VXLAN offload support on + be2net driver. diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 05529e273050..8ccaa2520dc3 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -34,7 +34,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "10.0.600.0u" +#define DRV_VER "10.2u" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev) #define BE_MIN_MTU 256 #define BE_NUM_VLANS_SUPPORTED 64 -#define BE_UMC_NUM_VLANS_SUPPORTED 15 #define BE_MAX_EQD 128u #define BE_MAX_TX_FRAG_COUNT 30 @@ -262,9 +261,10 @@ struct be_tx_obj { /* Struct to remember the pages posted for rx frags */ struct be_rx_page_info { struct page *page; + /* set to page-addr for last frag of the page & frag-addr otherwise */ DEFINE_DMA_UNMAP_ADDR(bus); u16 page_offset; - bool last_page_user; + bool last_frag; /* last frag of the page */ }; struct be_rx_stats { @@ -293,9 +293,10 @@ struct be_rx_compl_info { u8 ip_csum; u8 l4_csum; u8 ipv6; - u8 vtm; + u8 qnq; u8 pkt_type; u8 ip_frag; + u8 tunneled; }; struct be_rx_obj { @@ -359,6 +360,7 @@ struct be_vf_cfg { int pmac_id; u16 vlan_tag; u32 tx_rate; + u32 plink_tracking; }; enum vf_state { @@ -370,10 +372,11 @@ enum vf_state { #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) #define BE_FLAGS_VLAN_PROMISC (1 << 4) #define BE_FLAGS_NAPI_ENABLED (1 << 9) -#define BE_UC_PMAC_COUNT 30 -#define BE_VF_UC_PMAC_COUNT 2 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) +#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) +#define BE_UC_PMAC_COUNT 30 +#define BE_VF_UC_PMAC_COUNT 2 /* Ethtool set_dump flags */ #define LANCER_INITIATE_FW_DUMP 0x1 @@ -467,6 +470,7 @@ struct be_adapter { u32 port_num; bool promiscuous; + u8 mc_type; u32 function_mode; u32 function_caps; u32 rx_fc; /* Rx flow control */ @@ -492,6 +496,7 @@ struct be_adapter { u32 sli_family; u8 hba_port_num; u16 pvid; + __be16 vxlan_port; struct phy_info phy; u8 wol_cap; bool wol_en; @@ -536,6 +541,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter) return min_t(u16, num, num_online_cpus()); } +/* Is BE in pvid_tagging mode */ +#define be_pvid_tagging_enabled(adapter) (adapter->pvid) + +/* Is BE in QNQ multi-channel mode */ +#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \ + adapter->mc_type == vNIC1 || \ + adapter->mc_type == UFP) + #define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ adapter->pdev->device == OC_DEVICE_ID4) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 48076a6370c3..d1ec15af0d24 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -202,8 +202,12 @@ static void be_async_link_state_process(struct be_adapter *adapter, /* When link status changes, link speed must be re-queried from FW */ adapter->phy.link_speed = -1; - /* Ignore physical link event */ - if (lancer_chip(adapter) && + /* On BEx the FW does not send a separate link status + * notification for physical and logical link. + * On other chips just process the logical link + * status notification + */ + if (!BEx_chip(adapter) && !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) return; @@ -211,7 +215,8 @@ static void be_async_link_state_process(struct be_adapter *adapter, * it may not be received in some cases. */ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) - be_link_status_update(adapter, evt->port_link_status); + be_link_status_update(adapter, + evt->port_link_status & LINK_STATUS_MASK); } /* Grp5 CoS Priority evt */ @@ -239,10 +244,12 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, struct be_async_event_grp5_pvid_state *evt) { - if (evt->enabled) + if (evt->enabled) { adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; - else + dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); + } else { adapter->pvid = 0; + } } static void be_async_grp5_evt_process(struct be_adapter *adapter, @@ -3296,6 +3303,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, return NULL; } +static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) +{ + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; + int i; + + for (i = 0; i < desc_count; i++) { + if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) + return (struct be_port_res_desc *)hdr; + + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; + } + return NULL; +} + static void be_copy_nic_desc(struct be_resources *res, struct be_nic_res_desc *desc) { @@ -3439,6 +3461,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, { struct be_cmd_resp_get_profile_config *resp; struct be_pcie_res_desc *pcie; + struct be_port_res_desc *port; struct be_nic_res_desc *nic; struct be_queue_info *mccq = &adapter->mcc_obj.q; struct be_dma_mem cmd; @@ -3466,6 +3489,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, if (pcie) res->max_vfs = le16_to_cpu(pcie->num_vfs); + port = be_get_port_desc(resp->func_param, desc_count); + if (port) + adapter->mc_type = port->mc_type; + nic = be_get_nic_desc(resp->func_param, desc_count); if (nic) be_copy_nic_desc(res, nic); @@ -3476,14 +3503,11 @@ err: return status; } -/* Currently only Lancer uses this command and it supports version 0 only - * Uses sync mcc - */ -int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, - u8 domain) +int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, + int size, u8 version, u8 domain) { - struct be_mcc_wrb *wrb; struct be_cmd_req_set_profile_config *req; + struct be_mcc_wrb *wrb; int status; spin_lock_bh(&adapter->mcc_lock); @@ -3495,44 +3519,116 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, } req = embedded_payload(wrb); - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req), wrb, NULL); + req->hdr.version = version; req->hdr.domain = domain; req->desc_count = cpu_to_le32(1); - req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; - req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; - req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV); - req->nic_desc.pf_num = adapter->pf_number; - req->nic_desc.vf_num = domain; - - /* Mark fields invalid */ - req->nic_desc.unicast_mac_count = 0xFFFF; - req->nic_desc.mcc_count = 0xFFFF; - req->nic_desc.vlan_count = 0xFFFF; - req->nic_desc.mcast_mac_count = 0xFFFF; - req->nic_desc.txq_count = 0xFFFF; - req->nic_desc.rq_count = 0xFFFF; - req->nic_desc.rssq_count = 0xFFFF; - req->nic_desc.lro_count = 0xFFFF; - req->nic_desc.cq_count = 0xFFFF; - req->nic_desc.toe_conn_count = 0xFFFF; - req->nic_desc.eq_count = 0xFFFF; - req->nic_desc.link_param = 0xFF; - req->nic_desc.bw_min = 0xFFFFFFFF; - req->nic_desc.acpi_params = 0xFF; - req->nic_desc.wol_param = 0x0F; - - /* Change BW */ - req->nic_desc.bw_min = cpu_to_le32(bps); - req->nic_desc.bw_max = cpu_to_le32(bps); + memcpy(req->desc, desc, size); + status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } +/* Mark all fields invalid */ +void be_reset_nic_desc(struct be_nic_res_desc *nic) +{ + memset(nic, 0, sizeof(*nic)); + nic->unicast_mac_count = 0xFFFF; + nic->mcc_count = 0xFFFF; + nic->vlan_count = 0xFFFF; + nic->mcast_mac_count = 0xFFFF; + nic->txq_count = 0xFFFF; + nic->rq_count = 0xFFFF; + nic->rssq_count = 0xFFFF; + nic->lro_count = 0xFFFF; + nic->cq_count = 0xFFFF; + nic->toe_conn_count = 0xFFFF; + nic->eq_count = 0xFFFF; + nic->link_param = 0xFF; + nic->acpi_params = 0xFF; + nic->wol_param = 0x0F; + nic->bw_min = 0xFFFFFFFF; + nic->bw_max = 0xFFFFFFFF; +} + +int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain) +{ + if (lancer_chip(adapter)) { + struct be_nic_res_desc nic_desc; + + be_reset_nic_desc(&nic_desc); + nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; + nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; + nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | + (1 << NOSV_SHIFT); + nic_desc.pf_num = adapter->pf_number; + nic_desc.vf_num = domain; + nic_desc.bw_max = cpu_to_le32(bps); + + return be_cmd_set_profile_config(adapter, &nic_desc, + RESOURCE_DESC_SIZE_V0, + 0, domain); + } else { + return be_cmd_set_qos(adapter, bps, domain); + } +} + +int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_manage_iface_filters *req; + int status; + + if (iface == 0xFFFFFFFF) + return -1; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), + wrb, NULL); + req->op = op; + req->target_iface_id = cpu_to_le32(iface); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) +{ + struct be_port_res_desc port_desc; + + memset(&port_desc, 0, sizeof(port_desc)); + port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; + port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; + port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); + port_desc.link_num = adapter->hba_port_num; + if (port) { + port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | + (1 << RCVID_SHIFT); + port_desc.nv_port = swab16(port); + } else { + port_desc.nv_flags = NV_TYPE_DISABLED; + port_desc.nv_port = 0; + } + + return be_cmd_set_profile_config(adapter, &port_desc, + RESOURCE_DESC_SIZE_V1, 1, 0); +} + int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num) { @@ -3723,6 +3819,45 @@ err: return status; } +int be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, u8 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_ll_link *req; + int status; + + if (BEx_chip(adapter) || lancer_chip(adapter)) + return 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, + sizeof(*req), wrb, NULL); + + req->hdr.version = 1; + req->hdr.domain = domain; + + if (link_state == IFLA_VF_LINK_STATE_ENABLE) + req->link_config |= 1; + + if (link_state == IFLA_VF_LINK_STATE_AUTO) + req->link_config |= 1 << PLINK_TRACK_SHIFT; + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, int wrb_payload_size, u16 *cmd_status, u16 *ext_status) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index fc4e076dc202..b60e4d53c1c9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -203,6 +203,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_BEACON_STATE 70 #define OPCODE_COMMON_READ_TRANSRECV_DATA 73 #define OPCODE_COMMON_GET_PORT_NAME 77 +#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG 80 #define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 #define OPCODE_COMMON_SET_FN_PRIVILEGES 100 #define OPCODE_COMMON_GET_PHY_DETAILS 102 @@ -221,6 +222,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_FN_PRIVILEGES 170 #define OPCODE_COMMON_READ_OBJECT 171 #define OPCODE_COMMON_WRITE_OBJECT 172 +#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193 #define OPCODE_COMMON_GET_IFACE_LIST 194 #define OPCODE_COMMON_ENABLE_DISABLE_VF 196 @@ -1098,14 +1100,6 @@ struct be_cmd_resp_query_fw_cfg { u32 function_caps; }; -/* Is BE in a multi-channel mode */ -static inline bool be_is_mc(struct be_adapter *adapter) -{ - return adapter->function_mode & FLEX10_MODE || - adapter->function_mode & VNIC_MODE || - adapter->function_mode & UMC_ENABLED; -} - /******************** RSS Config ****************************************/ /* RSS type Input parameters used to compute RX hash * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4 @@ -1828,20 +1822,36 @@ struct be_cmd_req_set_ext_fat_caps { #define NIC_RESOURCE_DESC_TYPE_V0 0x41 #define PCIE_RESOURCE_DESC_TYPE_V1 0x50 #define NIC_RESOURCE_DESC_TYPE_V1 0x51 +#define PORT_RESOURCE_DESC_TYPE_V1 0x55 #define MAX_RESOURCE_DESC 264 -/* QOS unit number */ -#define QUN 4 -/* Immediate */ -#define IMM 6 -/* No save */ -#define NOSV 7 +#define IMM_SHIFT 6 /* Immediate */ +#define NOSV_SHIFT 7 /* No save */ struct be_res_desc_hdr { u8 desc_type; u8 desc_len; } __packed; +struct be_port_res_desc { + struct be_res_desc_hdr hdr; + u8 rsvd0; + u8 flags; + u8 link_num; + u8 mc_type; + u16 rsvd1; + +#define NV_TYPE_MASK 0x3 /* bits 0-1 */ +#define NV_TYPE_DISABLED 1 +#define NV_TYPE_VXLAN 3 +#define SOCVID_SHIFT 2 /* Strip outer vlan */ +#define RCVID_SHIFT 4 /* Report vlan */ + u8 nv_flags; + u8 rsvd2; + __le16 nv_port; /* vxlan/gre port */ + u32 rsvd3[19]; +} __packed; + struct be_pcie_res_desc { struct be_res_desc_hdr hdr; u8 rsvd0; @@ -1862,6 +1872,8 @@ struct be_pcie_res_desc { struct be_nic_res_desc { struct be_res_desc_hdr hdr; u8 rsvd1; + +#define QUN_SHIFT 4 /* QoS is in absolute units */ u8 flags; u8 vf_num; u8 rsvd2; @@ -1891,6 +1903,23 @@ struct be_nic_res_desc { u32 rsvd8[7]; } __packed; +/************ Multi-Channel type ***********/ +enum mc_type { + MC_NONE = 0x01, + UMC = 0x02, + FLEX10 = 0x03, + vNIC1 = 0x04, + nPAR = 0x05, + UFP = 0x06, + vNIC2 = 0x07 +}; + +/* Is BE in a multi-channel mode */ +static inline bool be_is_mc(struct be_adapter *adapter) +{ + return adapter->mc_type > MC_NONE; +} + struct be_cmd_req_get_func_config { struct be_cmd_req_hdr hdr; }; @@ -1919,7 +1948,7 @@ struct be_cmd_req_set_profile_config { struct be_cmd_req_hdr hdr; u32 rsvd; u32 desc_count; - struct be_nic_res_desc nic_desc; + u8 desc[RESOURCE_DESC_SIZE_V1]; }; struct be_cmd_resp_set_profile_config { @@ -1971,6 +2000,33 @@ struct be_cmd_resp_get_iface_list { struct be_if_desc if_desc; }; +/*************** Set logical link ********************/ +#define PLINK_TRACK_SHIFT 8 +struct be_cmd_req_set_ll_link { + struct be_cmd_req_hdr hdr; + u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */ +}; + +/************** Manage IFACE Filters *******************/ +#define OP_CONVERT_NORMAL_TO_TUNNEL 0 +#define OP_CONVERT_TUNNEL_TO_NORMAL 1 + +struct be_cmd_req_manage_iface_filters { + struct be_cmd_req_hdr hdr; + u8 op; + u8 rsvd0; + u8 flags; + u8 rsvd1; + u32 tunnel_iface_id; + u32 target_iface_id; + u8 mac[6]; + u16 vlan_tag; + u32 tenant_id; + u32 filter_id; + u32 cap_flags; + u32 cap_control_flags; +} __packed; + int be_pci_fnum_get(struct be_adapter *adapter); int be_fw_wait_ready(struct be_adapter *adapter); int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, @@ -2045,7 +2101,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, u8 loopback_type, u8 enable); int be_cmd_get_phy_info(struct be_adapter *adapter); -int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); +int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain); void be_detect_error(struct be_adapter *adapter); int be_cmd_get_die_temperature(struct be_adapter *adapter); int be_cmd_get_cntl_attributes(struct be_adapter *adapter); @@ -2086,9 +2142,14 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res); int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 domain); -int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain); +int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, + int size, u8 version, u8 domain); int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); +int be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, u8 domain); +int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port); +int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 05be0070f55f..15ba96cba65d 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -357,10 +357,10 @@ be_get_ethtool_stats(struct net_device *netdev, struct be_rx_stats *stats = rx_stats(rxo); do { - start = u64_stats_fetch_begin_bh(&stats->sync); + start = u64_stats_fetch_begin_irq(&stats->sync); data[base] = stats->rx_bytes; data[base + 1] = stats->rx_pkts; - } while (u64_stats_fetch_retry_bh(&stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { p = (u8 *)stats + et_rx_stats[i].offset; @@ -373,19 +373,19 @@ be_get_ethtool_stats(struct net_device *netdev, struct be_tx_stats *stats = tx_stats(txo); do { - start = u64_stats_fetch_begin_bh(&stats->sync_compl); + start = u64_stats_fetch_begin_irq(&stats->sync_compl); data[base] = stats->tx_compl; - } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); + } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start)); do { - start = u64_stats_fetch_begin_bh(&stats->sync); + start = u64_stats_fetch_begin_irq(&stats->sync); for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { p = (u8 *)stats + et_tx_stats[i].offset; data[base + i] = (et_tx_stats[i].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - } while (u64_stats_fetch_retry_bh(&stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); base += ETHTOOL_TXSTATS_NUM; } } @@ -802,16 +802,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) if (test->flags & ETH_TEST_FL_OFFLINE) { if (be_loopback_test(adapter, BE_MAC_LOOPBACK, - &data[0]) != 0) { + &data[0]) != 0) test->flags |= ETH_TEST_FL_FAILED; - } + if (be_loopback_test(adapter, BE_PHY_LOOPBACK, - &data[1]) != 0) { - test->flags |= ETH_TEST_FL_FAILED; - } - if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, - &data[2]) != 0) { + &data[1]) != 0) test->flags |= ETH_TEST_FL_FAILED; + + if (test->flags & ETH_TEST_FL_EXTERNAL_LB) { + if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, + &data[2]) != 0) + test->flags |= ETH_TEST_FL_FAILED; + test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; } } diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index dc88782185f2..3bd198550edb 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 { u8 numfrags[3]; /* dword 1 */ u8 rss_flush; /* dword 2 */ u8 cast_enc[2]; /* dword 2 */ - u8 vtm; /* dword 2 */ + u8 qnq; /* dword 2 */ u8 rss_bank; /* dword 2 */ u8 rsvd1[23]; /* dword 2 */ u8 lro_pkt; /* dword 2 */ @@ -401,13 +401,14 @@ struct amap_eth_rx_compl_v1 { u8 numfrags[3]; /* dword 1 */ u8 rss_flush; /* dword 2 */ u8 cast_enc[2]; /* dword 2 */ - u8 vtm; /* dword 2 */ + u8 qnq; /* dword 2 */ u8 rss_bank; /* dword 2 */ u8 port[2]; /* dword 2 */ u8 vntagp; /* dword 2 */ u8 header_len[8]; /* dword 2 */ u8 header_split[2]; /* dword 2 */ - u8 rsvd1[13]; /* dword 2 */ + u8 rsvd1[12]; /* dword 2 */ + u8 tunneled; u8 valid; /* dword 2 */ u8 rsshash[32]; /* dword 3 */ } __packed; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 36c80612e21a..3e6df47b6973 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -23,6 +23,7 @@ #include <linux/aer.h> #include <linux/if_bridge.h> #include <net/busy_poll.h> +#include <net/vxlan.h> MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -591,10 +592,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, for_all_rx_queues(adapter, rxo, i) { const struct be_rx_stats *rx_stats = rx_stats(rxo); do { - start = u64_stats_fetch_begin_bh(&rx_stats->sync); + start = u64_stats_fetch_begin_irq(&rx_stats->sync); pkts = rx_stats(rxo)->rx_pkts; bytes = rx_stats(rxo)->rx_bytes; - } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); stats->rx_packets += pkts; stats->rx_bytes += bytes; stats->multicast += rx_stats(rxo)->rx_mcast_pkts; @@ -605,10 +606,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, for_all_tx_queues(adapter, txo, i) { const struct be_tx_stats *tx_stats = tx_stats(txo); do { - start = u64_stats_fetch_begin_bh(&tx_stats->sync); + start = u64_stats_fetch_begin_irq(&tx_stats->sync); pkts = tx_stats(txo)->tx_pkts; bytes = tx_stats(txo)->tx_bytes; - } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); stats->tx_packets += pkts; stats->tx_bytes += bytes; } @@ -652,7 +653,7 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status) adapter->flags |= BE_FLAGS_LINK_STATUS_INIT; } - if ((link_status & LINK_STATUS_MASK) == LINK_UP) + if (link_status) netif_carrier_on(netdev); else netif_carrier_off(netdev); @@ -718,10 +719,23 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, return vlan_tag; } +/* Used only for IP tunnel packets */ +static u16 skb_inner_ip_proto(struct sk_buff *skb) +{ + return (inner_ip_hdr(skb)->version == 4) ? + inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr; +} + +static u16 skb_ip_proto(struct sk_buff *skb) +{ + return (ip_hdr(skb)->version == 4) ? + ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; +} + static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) { - u16 vlan_tag; + u16 vlan_tag, proto; memset(hdr, 0, sizeof(*hdr)); @@ -734,9 +748,15 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (is_tcp_pkt(skb)) + if (skb->encapsulation) { + AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1); + proto = skb_inner_ip_proto(skb); + } else { + proto = skb_ip_proto(skb); + } + if (proto == IPPROTO_TCP) AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); - else if (is_udp_pkt(skb)) + else if (proto == IPPROTO_UDP) AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); } @@ -935,9 +955,9 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, } /* If vlan tag is already inlined in the packet, skip HW VLAN - * tagging in UMC mode + * tagging in pvid-tagging mode */ - if ((adapter->function_mode & UMC_ENABLED) && + if (be_pvid_tagging_enabled(adapter) && veh->h_vlan_proto == htons(ETH_P_8021Q)) *skip_hw_vlan = true; @@ -1138,7 +1158,10 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) - goto ret; + return status; + + if (adapter->vlan_tag[vid]) + return status; adapter->vlan_tag[vid] = 1; adapter->vlans_added++; @@ -1148,7 +1171,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) adapter->vlans_added--; adapter->vlan_tag[vid] = 0; } -ret: + return status; } @@ -1288,6 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf, vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); + vi->linkstate = adapter->vf_cfg[vf].plink_tracking; return 0; } @@ -1342,11 +1366,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev, return -EINVAL; } - if (lancer_chip(adapter)) - status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1); - else - status = be_cmd_set_qos(adapter, rate / 10, vf + 1); - + status = be_cmd_config_qos(adapter, rate / 10, vf + 1); if (status) dev_err(&adapter->pdev->dev, "tx rate %d on VF %d failed\n", rate, vf); @@ -1354,6 +1374,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev, adapter->vf_cfg[vf].tx_rate = rate; return status; } +static int be_set_vf_link_state(struct net_device *netdev, int vf, + int link_state) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!sriov_enabled(adapter)) + return -EPERM; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + status = be_cmd_set_logical_link_config(adapter, link_state, vf+1); + if (!status) + adapter->vf_cfg[vf].plink_tracking = link_state; + + return status; +} static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts, ulong now) @@ -1386,15 +1424,15 @@ static void be_eqd_update(struct be_adapter *adapter) rxo = &adapter->rx_obj[eqo->idx]; do { - start = u64_stats_fetch_begin_bh(&rxo->stats.sync); + start = u64_stats_fetch_begin_irq(&rxo->stats.sync); rx_pkts = rxo->stats.rx_pkts; - } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start)); + } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); txo = &adapter->tx_obj[eqo->idx]; do { - start = u64_stats_fetch_begin_bh(&txo->stats.sync); + start = u64_stats_fetch_begin_irq(&txo->stats.sync); tx_pkts = txo->stats.tx_reqs; - } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start)); + } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); /* Skip, if wrapped around or first calculation */ @@ -1449,9 +1487,10 @@ static void be_rx_stats_update(struct be_rx_obj *rxo, static inline bool csum_passed(struct be_rx_compl_info *rxcp) { /* L4 checksum is not reliable for non TCP/UDP packets. - * Also ignore ipcksm for ipv6 pkts */ + * Also ignore ipcksm for ipv6 pkts + */ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && - (rxcp->ip_csum || rxcp->ipv6); + (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err; } static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) @@ -1464,11 +1503,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) rx_page_info = &rxo->page_info_tbl[frag_idx]; BUG_ON(!rx_page_info->page); - if (rx_page_info->last_page_user) { + if (rx_page_info->last_frag) { dma_unmap_page(&adapter->pdev->dev, dma_unmap_addr(rx_page_info, bus), adapter->big_page_size, DMA_FROM_DEVICE); - rx_page_info->last_page_user = false; + rx_page_info->last_frag = false; + } else { + dma_sync_single_for_cpu(&adapter->pdev->dev, + dma_unmap_addr(rx_page_info, bus), + rx_frag_size, DMA_FROM_DEVICE); } queue_tail_inc(rxq); @@ -1590,6 +1633,8 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); + + skb->encapsulation = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) @@ -1646,6 +1691,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (adapter->netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); + + skb->encapsulation = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) @@ -1676,12 +1723,14 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl, rxcp->rss_hash = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl); if (rxcp->vlanf) { - rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, + rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, compl); rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); } rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); + rxcp->tunneled = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl); } static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, @@ -1706,7 +1755,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, rxcp->rss_hash = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl); if (rxcp->vlanf) { - rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, + rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, compl); rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); @@ -1739,9 +1788,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) rxcp->l4_csum = 0; if (rxcp->vlanf) { - /* vlanf could be wrongly set in some cards. - * ignore if vtm is not set */ - if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm) + /* In QNQ modes, if qnq bit is not set, then the packet was + * tagged only with the transparent outer vlan-tag and must + * not be treated as a vlan packet by host + */ + if (be_is_qnq_mode(adapter) && !rxcp->qnq) rxcp->vlanf = 0; if (!lancer_chip(adapter)) @@ -1800,17 +1851,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) rx_stats(rxo)->rx_post_fail++; break; } - page_info->page_offset = 0; + page_offset = 0; } else { get_page(pagep); - page_info->page_offset = page_offset + rx_frag_size; + page_offset += rx_frag_size; } - page_offset = page_info->page_offset; + page_info->page_offset = page_offset; page_info->page = pagep; - dma_unmap_addr_set(page_info, bus, page_dmaaddr); - frag_dmaaddr = page_dmaaddr + page_info->page_offset; rxd = queue_head_node(rxq); + frag_dmaaddr = page_dmaaddr + page_info->page_offset; rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); @@ -1818,15 +1868,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) if ((page_offset + rx_frag_size + rx_frag_size) > adapter->big_page_size) { pagep = NULL; - page_info->last_page_user = true; + page_info->last_frag = true; + dma_unmap_addr_set(page_info, bus, page_dmaaddr); + } else { + dma_unmap_addr_set(page_info, bus, frag_dmaaddr); } prev_page_info = page_info; queue_head_inc(rxq); page_info = &rxo->page_info_tbl[rxq->head]; } - if (pagep) - prev_page_info->last_page_user = true; + + /* Mark the last frag of a page when we break out of the above loop + * with no more slots available in the RXQ + */ + if (pagep) { + prev_page_info->last_frag = true; + dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr); + } if (posted) { atomic_add(posted, &rxq->used); @@ -1883,7 +1942,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter, queue_tail_inc(txq); } while (cur_index != last_index); - kfree_skb(sent_skb); + dev_kfree_skb_any(sent_skb); return num_wrbs; } @@ -2439,6 +2498,9 @@ void be_detect_error(struct be_adapter *adapter) u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; u32 i; + bool error_detected = false; + struct device *dev = &adapter->pdev->dev; + struct net_device *netdev = adapter->netdev; if (be_hw_error(adapter)) return; @@ -2450,6 +2512,21 @@ void be_detect_error(struct be_adapter *adapter) SLIPORT_ERROR1_OFFSET); sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); + adapter->hw_error = true; + /* Do not log error messages if its a FW reset */ + if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && + sliport_err2 == SLIPORT_ERROR_FW_RESET2) { + dev_info(dev, "Firmware update in progress\n"); + } else { + error_detected = true; + dev_err(dev, "Error detected in the card\n"); + dev_err(dev, "ERR: sliport status 0x%x\n", + sliport_status); + dev_err(dev, "ERR: sliport error1 0x%x\n", + sliport_err1); + dev_err(dev, "ERR: sliport error2 0x%x\n", + sliport_err2); + } } } else { pci_read_config_dword(adapter->pdev, @@ -2463,51 +2540,33 @@ void be_detect_error(struct be_adapter *adapter) ue_lo = (ue_lo & ~ue_lo_mask); ue_hi = (ue_hi & ~ue_hi_mask); - } - - /* On certain platforms BE hardware can indicate spurious UEs. - * Allow the h/w to stop working completely in case of a real UE. - * Hence not setting the hw_error for UE detection. - */ - if (sliport_status & SLIPORT_STATUS_ERR_MASK) { - adapter->hw_error = true; - /* Do not log error messages if its a FW reset */ - if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && - sliport_err2 == SLIPORT_ERROR_FW_RESET2) { - dev_info(&adapter->pdev->dev, - "Firmware update in progress\n"); - return; - } else { - dev_err(&adapter->pdev->dev, - "Error detected in the card\n"); - } - } - - if (sliport_status & SLIPORT_STATUS_ERR_MASK) { - dev_err(&adapter->pdev->dev, - "ERR: sliport status 0x%x\n", sliport_status); - dev_err(&adapter->pdev->dev, - "ERR: sliport error1 0x%x\n", sliport_err1); - dev_err(&adapter->pdev->dev, - "ERR: sliport error2 0x%x\n", sliport_err2); - } - if (ue_lo) { - for (i = 0; ue_lo; ue_lo >>= 1, i++) { - if (ue_lo & 1) - dev_err(&adapter->pdev->dev, - "UE: %s bit set\n", ue_status_low_desc[i]); - } - } + /* On certain platforms BE hardware can indicate spurious UEs. + * Allow HW to stop working completely in case of a real UE. + * Hence not setting the hw_error for UE detection. + */ - if (ue_hi) { - for (i = 0; ue_hi; ue_hi >>= 1, i++) { - if (ue_hi & 1) - dev_err(&adapter->pdev->dev, - "UE: %s bit set\n", ue_status_hi_desc[i]); + if (ue_lo || ue_hi) { + error_detected = true; + dev_err(dev, + "Unrecoverable Error detected in the adapter"); + dev_err(dev, "Please reboot server to recover"); + if (skyhawk_chip(adapter)) + adapter->hw_error = true; + for (i = 0; ue_lo; ue_lo >>= 1, i++) { + if (ue_lo & 1) + dev_err(dev, "UE: %s bit set\n", + ue_status_low_desc[i]); + } + for (i = 0; ue_hi; ue_hi >>= 1, i++) { + if (ue_hi & 1) + dev_err(dev, "UE: %s bit set\n", + ue_status_hi_desc[i]); + } } } - + if (error_detected) + netif_carrier_off(netdev); } static void be_msix_disable(struct be_adapter *adapter) @@ -2521,7 +2580,7 @@ static void be_msix_disable(struct be_adapter *adapter) static int be_msix_enable(struct be_adapter *adapter) { - int i, status, num_vec; + int i, num_vec; struct device *dev = &adapter->pdev->dev; /* If RoCE is supported, program the max number of NIC vectors that @@ -2537,24 +2596,11 @@ static int be_msix_enable(struct be_adapter *adapter) for (i = 0; i < num_vec; i++) adapter->msix_entries[i].entry = i; - status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); - if (status == 0) { - goto done; - } else if (status >= MIN_MSIX_VECTORS) { - num_vec = status; - status = pci_enable_msix(adapter->pdev, adapter->msix_entries, - num_vec); - if (!status) - goto done; - } - - dev_warn(dev, "MSIx enable failed\n"); + num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + MIN_MSIX_VECTORS, num_vec); + if (num_vec < 0) + goto fail; - /* INTx is not supported in VFs, so fail probe if enable_msix fails */ - if (!be_physfn(adapter)) - return status; - return 0; -done: if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) { adapter->num_msix_roce_vec = num_vec / 2; dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n", @@ -2566,6 +2612,14 @@ done: dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n", adapter->num_msix_vec); return 0; + +fail: + dev_warn(dev, "MSIx enable failed\n"); + + /* INTx is not supported in VFs, so fail probe if enable_msix fails */ + if (!be_physfn(adapter)) + return num_vec; + return 0; } static inline int be_msix_vec_get(struct be_adapter *adapter, @@ -2807,6 +2861,12 @@ static int be_open(struct net_device *netdev) netif_tx_start_all_queues(netdev); be_roce_dev_open(adapter); + +#ifdef CONFIG_BE2NET_VXLAN + if (skyhawk_chip(adapter)) + vxlan_get_rx_port(netdev); +#endif + return 0; err: be_close(adapter->netdev); @@ -2962,6 +3022,21 @@ static void be_mac_clear(struct be_adapter *adapter) } } +#ifdef CONFIG_BE2NET_VXLAN +static void be_disable_vxlan_offloads(struct be_adapter *adapter) +{ + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) + be_cmd_manage_iface(adapter, adapter->if_handle, + OP_CONVERT_TUNNEL_TO_NORMAL); + + if (adapter->vxlan_port) + be_cmd_set_vxlan_port(adapter, 0); + + adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS; + adapter->vxlan_port = 0; +} +#endif + static int be_clear(struct be_adapter *adapter) { be_cancel_worker(adapter); @@ -2969,6 +3044,9 @@ static int be_clear(struct be_adapter *adapter) if (sriov_enabled(adapter)) be_vf_clear(adapter); +#ifdef CONFIG_BE2NET_VXLAN + be_disable_vxlan_offloads(adapter); +#endif /* delete the primary mac along with the uc-mac list */ be_mac_clear(adapter); @@ -3093,15 +3171,19 @@ static int be_vf_setup(struct be_adapter *adapter) * Allow full available bandwidth */ if (BE3_chip(adapter) && !old_vfs) - be_cmd_set_qos(adapter, 1000, vf+1); + be_cmd_config_qos(adapter, 1000, vf + 1); status = be_cmd_link_status_query(adapter, &lnk_speed, NULL, vf + 1); if (!status) vf_cfg->tx_rate = lnk_speed; - if (!old_vfs) + if (!old_vfs) { be_cmd_enable_vf(adapter, vf + 1); + be_cmd_set_logical_link_config(adapter, + IFLA_VF_LINK_STATE_AUTO, + vf+1); + } } if (!old_vfs) { @@ -3119,19 +3201,38 @@ err: return status; } +/* Converting function_mode bits on BE3 to SH mc_type enums */ + +static u8 be_convert_mc_type(u32 function_mode) +{ + if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE) + return vNIC1; + else if (function_mode & FLEX10_MODE) + return FLEX10; + else if (function_mode & VNIC_MODE) + return vNIC2; + else if (function_mode & UMC_ENABLED) + return UMC; + else + return MC_NONE; +} + /* On BE2/BE3 FW does not suggest the supported limits */ static void BEx_get_resources(struct be_adapter *adapter, struct be_resources *res) { struct pci_dev *pdev = adapter->pdev; bool use_sriov = false; - int max_vfs; - - max_vfs = pci_sriov_get_totalvfs(pdev); - - if (BE3_chip(adapter) && sriov_want(adapter)) { - res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; - use_sriov = res->max_vfs; + int max_vfs = 0; + + if (be_physfn(adapter) && BE3_chip(adapter)) { + be_cmd_get_profile_config(adapter, res, 0); + /* Some old versions of BE3 FW don't report max_vfs value */ + if (res->max_vfs == 0) { + max_vfs = pci_sriov_get_totalvfs(pdev); + res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; + } + use_sriov = res->max_vfs && sriov_want(adapter); } if (be_physfn(adapter)) @@ -3139,17 +3240,32 @@ static void BEx_get_resources(struct be_adapter *adapter, else res->max_uc_mac = BE_VF_UC_PMAC_COUNT; - if (adapter->function_mode & FLEX10_MODE) - res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; - else if (adapter->function_mode & UMC_ENABLED) - res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED; - else + adapter->mc_type = be_convert_mc_type(adapter->function_mode); + + if (be_is_mc(adapter)) { + /* Assuming that there are 4 channels per port, + * when multi-channel is enabled + */ + if (be_is_qnq_mode(adapter)) + res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; + else + /* In a non-qnq multichannel mode, the pvid + * takes up one vlan entry + */ + res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1; + } else { res->max_vlans = BE_NUM_VLANS_SUPPORTED; + } + res->max_mcast_mac = BE_MAX_MC; - /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */ - if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) || - !be_physfn(adapter) || (adapter->port_num > 1)) + /* 1) For BE3 1Gb ports, FW does not support multiple TXQs + * 2) Create multiple TX rings on a BE3-R multi-channel interface + * *only* if it is RSS-capable. + */ + if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) || + !be_physfn(adapter) || (be_is_mc(adapter) && + !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) res->max_tx_qs = 1; else res->max_tx_qs = BE3_MAX_TX_QS; @@ -3161,7 +3277,7 @@ static void BEx_get_resources(struct be_adapter *adapter, res->max_rx_qs = res->max_rss_qs + 1; if (be_physfn(adapter)) - res->max_evt_qs = (max_vfs > 0) ? + res->max_evt_qs = (res->max_vfs > 0) ? BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; else res->max_evt_qs = 1; @@ -3252,9 +3368,8 @@ static int be_get_config(struct be_adapter *adapter) if (status) return status; - /* primary mac needs 1 pmac entry */ - adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32), - GFP_KERNEL); + adapter->pmac_id = kcalloc(be_max_uc(adapter), + sizeof(*adapter->pmac_id), GFP_KERNEL); if (!adapter->pmac_id) return -ENOMEM; @@ -3428,6 +3543,10 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); + if (be_physfn(adapter)) + be_cmd_set_logical_link_config(adapter, + IFLA_VF_LINK_STATE_AUTO, 0); + if (sriov_want(adapter)) { if (be_max_vfs(adapter)) be_vf_setup(adapter); @@ -4052,6 +4171,67 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB); } +#ifdef CONFIG_BE2NET_VXLAN +static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, + __be16 port) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + int status; + + if (lancer_chip(adapter) || BEx_chip(adapter)) + return; + + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { + dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n", + be16_to_cpu(port)); + dev_info(dev, + "Only one UDP port supported for VxLAN offloads\n"); + return; + } + + status = be_cmd_manage_iface(adapter, adapter->if_handle, + OP_CONVERT_NORMAL_TO_TUNNEL); + if (status) { + dev_warn(dev, "Failed to convert normal interface to tunnel\n"); + goto err; + } + + status = be_cmd_set_vxlan_port(adapter, port); + if (status) { + dev_warn(dev, "Failed to add VxLAN port\n"); + goto err; + } + adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; + adapter->vxlan_port = port; + + dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", + be16_to_cpu(port)); + return; +err: + be_disable_vxlan_offloads(adapter); + return; +} + +static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, + __be16 port) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (lancer_chip(adapter) || BEx_chip(adapter)) + return; + + if (adapter->vxlan_port != port) + return; + + be_disable_vxlan_offloads(adapter); + + dev_info(&adapter->pdev->dev, + "Disabled VxLAN offloads for UDP port %d\n", + be16_to_cpu(port)); +} +#endif + static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, @@ -4067,13 +4247,18 @@ static const struct net_device_ops be_netdev_ops = { .ndo_set_vf_vlan = be_set_vf_vlan, .ndo_set_vf_tx_rate = be_set_vf_tx_rate, .ndo_get_vf_config = be_get_vf_config, + .ndo_set_vf_link_state = be_set_vf_link_state, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = be_netpoll, #endif .ndo_bridge_setlink = be_ndo_bridge_setlink, .ndo_bridge_getlink = be_ndo_bridge_getlink, #ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = be_busy_poll + .ndo_busy_poll = be_busy_poll, +#endif +#ifdef CONFIG_BE2NET_VXLAN + .ndo_add_vxlan_port = be_add_vxlan_port, + .ndo_del_vxlan_port = be_del_vxlan_port, #endif }; @@ -4081,6 +4266,12 @@ static void be_netdev_init(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); + if (skyhawk_chip(adapter)) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + } netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; @@ -4427,14 +4618,32 @@ static bool be_reset_required(struct be_adapter *adapter) static char *mc_name(struct be_adapter *adapter) { - if (adapter->function_mode & FLEX10_MODE) - return "FLEX10"; - else if (adapter->function_mode & VNIC_MODE) - return "vNIC"; - else if (adapter->function_mode & UMC_ENABLED) - return "UMC"; - else - return ""; + char *str = ""; /* default */ + + switch (adapter->mc_type) { + case UMC: + str = "UMC"; + break; + case FLEX10: + str = "FLEX10"; + break; + case vNIC1: + str = "vNIC-1"; + break; + case nPAR: + str = "nPAR"; + break; + case UFP: + str = "UFP"; + break; + case vNIC2: + str = "vNIC-2"; + break; + default: + str = ""; + } + + return str; } static inline char *func_name(struct be_adapter *adapter) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 9cd5415fe017..5bf16603a3e9 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -35,6 +35,12 @@ static void _be_roce_dev_add(struct be_adapter *adapter) if (!ocrdma_drv) return; + + if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) { + dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n"); + return; + } + if (pdev->device == OC_DEVICE_ID5) { /* only msix is supported on these devices */ if (!msix_enabled(adapter)) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index 2cd1129e19af..a3d9e96c18eb 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -21,6 +21,8 @@ #include <linux/pci.h> #include <linux/netdevice.h> +#define BE_ROCE_ABI_VERSION 1 + struct ocrdma_dev; enum be_interrupt_mode { @@ -52,6 +54,7 @@ struct be_dev_info { /* ocrdma driver register's the callback functions with nic driver. */ struct ocrdma_driver { unsigned char name[32]; + u32 be_abi_version; struct ocrdma_dev *(*add) (struct be_dev_info *dev_info); void (*remove) (struct ocrdma_dev *); void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 55e0fa03dc90..8b70ca7e342b 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -660,11 +660,6 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) return -EBUSY; } -static int ethoc_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - static void ethoc_mdio_poll(struct net_device *dev) { } @@ -1210,7 +1205,6 @@ static int ethoc_probe(struct platform_device *pdev) priv->mdio->name, pdev->id); priv->mdio->read = ethoc_mdio_read; priv->mdio->write = ethoc_mdio_write; - priv->mdio->reset = ethoc_mdio_reset; priv->mdio->priv = priv; priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index c11ecbc98149..68069eabc4f8 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -940,11 +940,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, return -EIO; } -static int ftgmac100_mdiobus_reset(struct mii_bus *bus) -{ - return 0; -} - /****************************************************************************** * struct ethtool_ops functions *****************************************************************************/ @@ -1262,7 +1257,6 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->mii_bus->priv = netdev; priv->mii_bus->read = ftgmac100_mdiobus_read; priv->mii_bus->write = ftgmac100_mdiobus_write; - priv->mii_bus->reset = ftgmac100_mdiobus_reset; priv->mii_bus->irq = priv->phy_irq; for (i = 0; i < PHY_MAX_ADDR; i++) diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 549ce13b92ac..71debd1c18c9 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o gianfar_driver-objs := gianfar.o \ - gianfar_ethtool.o \ - gianfar_sysfs.o + gianfar_ethtool.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 479a7cba45c0..8d69e439f0c5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -338,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Protocol checksum off-load for TCP and UDP. */ if (fec_enet_clear_csum(skb, ndev)) { - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex) /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); - /* Setup multicast filter. */ - set_multicast_list(ndev); -#ifndef CONFIG_M5272 - writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_HASH_TABLE_LOW); -#endif - /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); @@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex) writel(rcntl, fep->hwp + FEC_R_CNTRL); + /* Setup multicast filter. */ + set_multicast_list(ndev); +#ifndef CONFIG_M5272 + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_HASH_TABLE_LOW); +#endif + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ ecntl |= (1 << 8); @@ -1255,11 +1255,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, return 0; } -static int fec_enet_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1384,7 +1379,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->name = "fec_enet_mii_bus"; fep->mii_bus->read = fec_enet_mdio_read; fep->mii_bus->write = fec_enet_mdio_write; - fep->mii_bus->reset = fec_enet_mdio_reset; snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, fep->dev_id + 1); fep->mii_bus->priv = fep; @@ -1904,10 +1898,11 @@ fec_set_mac_address(struct net_device *ndev, void *p) struct fec_enet_private *fep = netdev_priv(ndev); struct sockaddr *addr = p; - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + if (addr) { + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + } writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), @@ -2006,6 +2001,8 @@ static int fec_enet_init(struct net_device *ndev) /* Get the Ethernet address */ fec_get_mac(ndev); + /* make sure MAC we just acquired is programmed into the hw */ + fec_set_mac_address(ndev, NULL); /* init the tx & rx ring size */ fep->tx_ring_size = TX_RING_SIZE; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 89ccb5b08708..82386b29914a 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -372,6 +372,7 @@ void fec_ptp_init(struct platform_device *pdev) fep->ptp_caps.n_alarm = 0; fep->ptp_caps.n_ext_ts = 0; fep->ptp_caps.n_per_out = 0; + fep->ptp_caps.n_pins = 0; fep->ptp_caps.pps = 0; fep->ptp_caps.adjfreq = fec_ptp_adjfreq; fep->ptp_caps.adjtime = fec_ptp_adjtime; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 62f042d4aaa9..dc80db41d6b3 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -91,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) u16 pkt_len, sc; int curidx; + if (budget <= 0) + return received; + /* * First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 7e69c983d12a..ebf5d6429a8d 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -95,12 +95,6 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, } -static int fs_enet_fec_mii_reset(struct mii_bus *bus) -{ - /* nothing here - for now */ - return 0; -} - static struct of_device_id fs_enet_mdio_fec_match[]; static int fs_enet_mdio_probe(struct platform_device *ofdev) { @@ -128,7 +122,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->name = "FEC MII Bus"; new_bus->read = &fs_enet_fec_mii_read; new_bus->write = &fs_enet_fec_mii_write; - new_bus->reset = &fs_enet_fec_mii_reset; ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); if (ret) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index ad5a5aadc7e1..9125d9abf099 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -9,7 +9,7 @@ * Maintainer: Kumar Gala * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * - * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. * Copyright 2007 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it @@ -121,7 +121,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id); static irqreturn_t gfar_transmit(int irq, void *dev_id); static irqreturn_t gfar_interrupt(int irq, void *dev_id); static void adjust_link(struct net_device *dev); -static void init_registers(struct net_device *dev); static int init_phy(struct net_device *dev); static int gfar_probe(struct platform_device *ofdev); static int gfar_remove(struct platform_device *ofdev); @@ -129,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv); static void gfar_set_multi(struct net_device *dev); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_configure_serdes(struct net_device *dev); -static int gfar_poll(struct napi_struct *napi, int budget); -static int gfar_poll_sq(struct napi_struct *napi, int budget); +static int gfar_poll_rx(struct napi_struct *napi, int budget); +static int gfar_poll_tx(struct napi_struct *napi, int budget); +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif @@ -138,9 +139,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull, struct napi_struct *napi); -void gfar_halt(struct net_device *dev); -static void gfar_halt_nodisable(struct net_device *dev); -void gfar_start(struct net_device *dev); +static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, const u8 *addr); @@ -332,72 +331,76 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv) } } -static void gfar_init_mac(struct net_device *ndev) +static void gfar_rx_buff_size_config(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(ndev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 rctrl = 0; - u32 tctrl = 0; - u32 attrs = 0; - - /* write the tx/rx base registers */ - gfar_init_tx_rx_base(priv); - - /* Configure the coalescing support */ - gfar_configure_coalescing_all(priv); + int frame_size = priv->ndev->mtu + ETH_HLEN; /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0; + if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) + priv->uses_rxfcb = 1; + + if (priv->hwts_rx_en) + priv->uses_rxfcb = 1; + + if (priv->uses_rxfcb) + frame_size += GMAC_FCB_LEN; + + frame_size += priv->padding; + + frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + + INCREMENTAL_BUFFER_SIZE; + + priv->rx_buffer_size = frame_size; +} + +static void gfar_mac_rx_config(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 rctrl = 0; + if (priv->rx_filer_enable) { rctrl |= RCTRL_FILREN; /* Program the RIR0 reg with the required distribution */ - gfar_write(®s->rir0, DEFAULT_RIR0); + if (priv->poll_mode == GFAR_SQ_POLLING) + gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); + else /* GFAR_MQ_POLLING */ + gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); } /* Restore PROMISC mode */ - if (ndev->flags & IFF_PROMISC) + if (priv->ndev->flags & IFF_PROMISC) rctrl |= RCTRL_PROM; - if (ndev->features & NETIF_F_RXCSUM) { + if (priv->ndev->features & NETIF_F_RXCSUM) rctrl |= RCTRL_CHECKSUMMING; - priv->uses_rxfcb = 1; - } - if (priv->extended_hash) { - rctrl |= RCTRL_EXTHASH; - - gfar_clear_exact_match(ndev); - rctrl |= RCTRL_EMEN; - } + if (priv->extended_hash) + rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; if (priv->padding) { rctrl &= ~RCTRL_PAL_MASK; rctrl |= RCTRL_PADDING(priv->padding); } - /* Insert receive time stamps into padding alignment bytes */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { - rctrl &= ~RCTRL_PAL_MASK; - rctrl |= RCTRL_PADDING(8); - priv->padding = 8; - } - /* Enable HW time stamping if requested from user space */ - if (priv->hwts_rx_en) { + if (priv->hwts_rx_en) rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; - priv->uses_rxfcb = 1; - } - if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { + if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; - priv->uses_rxfcb = 1; - } /* Init rctrl based on our settings */ gfar_write(®s->rctrl, rctrl); +} + +static void gfar_mac_tx_config(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tctrl = 0; - if (ndev->features & NETIF_F_IP_CSUM) + if (priv->ndev->features & NETIF_F_IP_CSUM) tctrl |= TCTRL_INIT_CSUM; if (priv->prio_sched_en) @@ -408,30 +411,51 @@ static void gfar_init_mac(struct net_device *ndev) gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); } - gfar_write(®s->tctrl, tctrl); + if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tctrl |= TCTRL_VLINS; - /* Set the extraction length and index */ - attrs = ATTRELI_EL(priv->rx_stash_size) | - ATTRELI_EI(priv->rx_stash_index); + gfar_write(®s->tctrl, tctrl); +} - gfar_write(®s->attreli, attrs); +static void gfar_configure_coalescing(struct gfar_private *priv, + unsigned long tx_mask, unsigned long rx_mask) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; - /* Start with defaults, and add stashing or locking - * depending on the approprate variables - */ - attrs = ATTR_INIT_SETTINGS; + if (priv->mode == MQ_MG_MODE) { + int i = 0; - if (priv->bd_stash_en) - attrs |= ATTR_BDSTASH; + baddr = ®s->txic0; + for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { + gfar_write(baddr + i, 0); + if (likely(priv->tx_queue[i]->txcoalescing)) + gfar_write(baddr + i, priv->tx_queue[i]->txic); + } - if (priv->rx_stash_size != 0) - attrs |= ATTR_BUFSTASH; + baddr = ®s->rxic0; + for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { + gfar_write(baddr + i, 0); + if (likely(priv->rx_queue[i]->rxcoalescing)) + gfar_write(baddr + i, priv->rx_queue[i]->rxic); + } + } else { + /* Backward compatible case -- even if we enable + * multiple queues, there's only single reg to program + */ + gfar_write(®s->txic, 0); + if (likely(priv->tx_queue[0]->txcoalescing)) + gfar_write(®s->txic, priv->tx_queue[0]->txic); - gfar_write(®s->attr, attrs); + gfar_write(®s->rxic, 0); + if (unlikely(priv->rx_queue[0]->rxcoalescing)) + gfar_write(®s->rxic, priv->rx_queue[0]->rxic); + } +} - gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); - gfar_write(®s->fifo_tx_starve, priv->fifo_starve); - gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); +void gfar_configure_coalescing_all(struct gfar_private *priv) +{ + gfar_configure_coalescing(priv, 0xFF, 0xFF); } static struct net_device_stats *gfar_get_stats(struct net_device *dev) @@ -479,12 +503,27 @@ static const struct net_device_ops gfar_netdev_ops = { #endif }; -void lock_rx_qs(struct gfar_private *priv) +static void gfar_ints_disable(struct gfar_private *priv) { int i; + for (i = 0; i < priv->num_grps; i++) { + struct gfar __iomem *regs = priv->gfargrp[i].regs; + /* Clear IEVENT */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - for (i = 0; i < priv->num_rx_queues; i++) - spin_lock(&priv->rx_queue[i]->rxlock); + /* Initialize IMASK */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + } +} + +static void gfar_ints_enable(struct gfar_private *priv) +{ + int i; + for (i = 0; i < priv->num_grps; i++) { + struct gfar __iomem *regs = priv->gfargrp[i].regs; + /* Unmask the interrupts we look for */ + gfar_write(®s->imask, IMASK_DEFAULT); + } } void lock_tx_qs(struct gfar_private *priv) @@ -495,23 +534,50 @@ void lock_tx_qs(struct gfar_private *priv) spin_lock(&priv->tx_queue[i]->txlock); } -void unlock_rx_qs(struct gfar_private *priv) +void unlock_tx_qs(struct gfar_private *priv) { int i; - for (i = 0; i < priv->num_rx_queues; i++) - spin_unlock(&priv->rx_queue[i]->rxlock); + for (i = 0; i < priv->num_tx_queues; i++) + spin_unlock(&priv->tx_queue[i]->txlock); } -void unlock_tx_qs(struct gfar_private *priv) +static int gfar_alloc_tx_queues(struct gfar_private *priv) { int i; - for (i = 0; i < priv->num_tx_queues; i++) - spin_unlock(&priv->tx_queue[i]->txlock); + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), + GFP_KERNEL); + if (!priv->tx_queue[i]) + return -ENOMEM; + + priv->tx_queue[i]->tx_skbuff = NULL; + priv->tx_queue[i]->qindex = i; + priv->tx_queue[i]->dev = priv->ndev; + spin_lock_init(&(priv->tx_queue[i]->txlock)); + } + return 0; } -static void free_tx_pointers(struct gfar_private *priv) +static int gfar_alloc_rx_queues(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), + GFP_KERNEL); + if (!priv->rx_queue[i]) + return -ENOMEM; + + priv->rx_queue[i]->rx_skbuff = NULL; + priv->rx_queue[i]->qindex = i; + priv->rx_queue[i]->dev = priv->ndev; + } + return 0; +} + +static void gfar_free_tx_queues(struct gfar_private *priv) { int i; @@ -519,7 +585,7 @@ static void free_tx_pointers(struct gfar_private *priv) kfree(priv->tx_queue[i]); } -static void free_rx_pointers(struct gfar_private *priv) +static void gfar_free_rx_queues(struct gfar_private *priv) { int i; @@ -553,23 +619,26 @@ static void disable_napi(struct gfar_private *priv) { int i; - for (i = 0; i < priv->num_grps; i++) - napi_disable(&priv->gfargrp[i].napi); + for (i = 0; i < priv->num_grps; i++) { + napi_disable(&priv->gfargrp[i].napi_rx); + napi_disable(&priv->gfargrp[i].napi_tx); + } } static void enable_napi(struct gfar_private *priv) { int i; - for (i = 0; i < priv->num_grps; i++) - napi_enable(&priv->gfargrp[i].napi); + for (i = 0; i < priv->num_grps; i++) { + napi_enable(&priv->gfargrp[i].napi_rx); + napi_enable(&priv->gfargrp[i].napi_tx); + } } static int gfar_parse_group(struct device_node *np, struct gfar_private *priv, const char *model) { struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; - u32 *queue_mask; int i; for (i = 0; i < GFAR_NUM_IRQS; i++) { @@ -598,16 +667,52 @@ static int gfar_parse_group(struct device_node *np, grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { - queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); - grp->rx_bit_map = queue_mask ? - *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); - queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); - grp->tx_bit_map = queue_mask ? - *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); + u32 *rxq_mask, *txq_mask; + rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); + txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); + + if (priv->poll_mode == GFAR_SQ_POLLING) { + /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + } else { /* GFAR_MQ_POLLING */ + grp->rx_bit_map = rxq_mask ? + *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = txq_mask ? + *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } } else { grp->rx_bit_map = 0xFF; grp->tx_bit_map = 0xFF; } + + /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses + * right to left, so we need to revert the 8 bits to get the q index + */ + grp->rx_bit_map = bitrev8(grp->rx_bit_map); + grp->tx_bit_map = bitrev8(grp->tx_bit_map); + + /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, + * also assign queues to groups + */ + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { + if (!grp->rx_queue) + grp->rx_queue = priv->rx_queue[i]; + grp->num_rx_queues++; + grp->rstat |= (RSTAT_CLEAR_RHALT >> i); + priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); + priv->rx_queue[i]->grp = grp; + } + + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { + if (!grp->tx_queue) + grp->tx_queue = priv->tx_queue[i]; + grp->num_tx_queues++; + grp->tstat |= (TSTAT_CLEAR_THALT >> i); + priv->tqueue |= (TQUEUE_EN0 >> i); + priv->tx_queue[i]->grp = grp; + } + priv->num_grps++; return 0; @@ -628,13 +733,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) const u32 *stash_idx; unsigned int num_tx_qs, num_rx_qs; u32 *tx_queues, *rx_queues; + unsigned short mode, poll_mode; if (!np || !of_device_is_available(np)) return -ENODEV; - /* parse the num of tx and rx queues */ + if (of_device_is_compatible(np, "fsl,etsec2")) { + mode = MQ_MG_MODE; + poll_mode = GFAR_SQ_POLLING; + } else { + mode = SQ_SG_MODE; + poll_mode = GFAR_SQ_POLLING; + } + + /* parse the num of HW tx and rx queues */ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); - num_tx_qs = tx_queues ? *tx_queues : 1; + rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); + + if (mode == SQ_SG_MODE) { + num_tx_qs = 1; + num_rx_qs = 1; + } else { /* MQ_MG_MODE */ + /* get the actual number of supported groups */ + unsigned int num_grps = of_get_available_child_count(np); + + if (num_grps == 0 || num_grps > MAXGROUPS) { + dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", + num_grps); + pr_err("Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + if (poll_mode == GFAR_SQ_POLLING) { + num_tx_qs = num_grps; /* one txq per int group */ + num_rx_qs = num_grps; /* one rxq per int group */ + } else { /* GFAR_MQ_POLLING */ + num_tx_qs = tx_queues ? *tx_queues : 1; + num_rx_qs = rx_queues ? *rx_queues : 1; + } + } if (num_tx_qs > MAX_TX_QS) { pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", @@ -643,9 +780,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -EINVAL; } - rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); - num_rx_qs = rx_queues ? *rx_queues : 1; - if (num_rx_qs > MAX_RX_QS) { pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", num_rx_qs, MAX_RX_QS); @@ -661,10 +795,20 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv = netdev_priv(dev); priv->ndev = dev; + priv->mode = mode; + priv->poll_mode = poll_mode; + priv->num_tx_queues = num_tx_qs; netif_set_real_num_rx_queues(dev, num_rx_qs); priv->num_rx_queues = num_rx_qs; - priv->num_grps = 0x0; + + err = gfar_alloc_tx_queues(priv); + if (err) + goto tx_alloc_failed; + + err = gfar_alloc_rx_queues(priv); + if (err) + goto rx_alloc_failed; /* Init Rx queue filer rule set linked list */ INIT_LIST_HEAD(&priv->rx_list.list); @@ -677,52 +821,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->gfargrp[i].regs = NULL; /* Parse and initialize group specific information */ - if (of_device_is_compatible(np, "fsl,etsec2")) { - priv->mode = MQ_MG_MODE; + if (priv->mode == MQ_MG_MODE) { for_each_child_of_node(np, child) { err = gfar_parse_group(child, priv, model); if (err) goto err_grp_init; } - } else { - priv->mode = SQ_SG_MODE; + } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); if (err) goto err_grp_init; } - for (i = 0; i < priv->num_tx_queues; i++) - priv->tx_queue[i] = NULL; - for (i = 0; i < priv->num_rx_queues; i++) - priv->rx_queue[i] = NULL; - - for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), - GFP_KERNEL); - if (!priv->tx_queue[i]) { - err = -ENOMEM; - goto tx_alloc_failed; - } - priv->tx_queue[i]->tx_skbuff = NULL; - priv->tx_queue[i]->qindex = i; - priv->tx_queue[i]->dev = dev; - spin_lock_init(&(priv->tx_queue[i]->txlock)); - } - - for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), - GFP_KERNEL); - if (!priv->rx_queue[i]) { - err = -ENOMEM; - goto rx_alloc_failed; - } - priv->rx_queue[i]->rx_skbuff = NULL; - priv->rx_queue[i]->qindex = i; - priv->rx_queue[i]->dev = dev; - spin_lock_init(&(priv->rx_queue[i]->rxlock)); - } - - stash = of_get_property(np, "bd-stash", NULL); if (stash) { @@ -749,17 +859,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) memcpy(dev->dev_addr, mac_addr, ETH_ALEN); if (model && !strcasecmp(model, "TSEC")) - priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | + priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | FSL_GIANFAR_DEV_HAS_MULTI_INTR; if (model && !strcasecmp(model, "eTSEC")) - priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | + priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | FSL_GIANFAR_DEV_HAS_MULTI_INTR | - FSL_GIANFAR_DEV_HAS_PADDING | FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | @@ -784,12 +893,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return 0; -rx_alloc_failed: - free_rx_pointers(priv); -tx_alloc_failed: - free_tx_pointers(priv); err_grp_init: unmap_group_regs(priv); +rx_alloc_failed: + gfar_free_rx_queues(priv); +tx_alloc_failed: + gfar_free_tx_queues(priv); free_gfar_dev(priv); return err; } @@ -822,18 +931,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: if (priv->hwts_rx_en) { - stop_gfar(netdev); priv->hwts_rx_en = 0; - startup_gfar(netdev); + reset_gfar(netdev); } break; default: if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) return -ERANGE; if (!priv->hwts_rx_en) { - stop_gfar(netdev); priv->hwts_rx_en = 1; - startup_gfar(netdev); + reset_gfar(netdev); } config.rx_filter = HWTSTAMP_FILTER_ALL; break; @@ -875,19 +982,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(priv->phydev, rq, cmd); } -static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) -{ - unsigned int new_bit_map = 0x0; - int mask = 0x1 << (max_qs - 1), i; - - for (i = 0; i < max_qs; i++) { - if (bit_map & mask) - new_bit_map = new_bit_map + (1 << i); - mask = mask >> 0x1; - } - return new_bit_map; -} - static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class) { @@ -1005,99 +1099,140 @@ static void gfar_detect_errata(struct gfar_private *priv) priv->errata); } -/* Set up the ethernet device structure, private data, - * and anything else we need before we start - */ -static int gfar_probe(struct platform_device *ofdev) +void gfar_mac_reset(struct gfar_private *priv) { + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - struct net_device *dev = NULL; - struct gfar_private *priv = NULL; - struct gfar __iomem *regs = NULL; - int err = 0, i, grp_idx = 0; - u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; - u32 isrg = 0; - u32 __iomem *baddr; - - err = gfar_of_init(ofdev, &dev); - - if (err) - return err; - - priv = netdev_priv(dev); - priv->ndev = dev; - priv->ofdev = ofdev; - priv->dev = &ofdev->dev; - SET_NETDEV_DEV(dev, &ofdev->dev); - - spin_lock_init(&priv->bflock); - INIT_WORK(&priv->reset_task, gfar_reset_task); - - platform_set_drvdata(ofdev, priv); - regs = priv->gfargrp[0].regs; - - gfar_detect_errata(priv); - - /* Stop the DMA engine now, in case it was running before - * (The firmware could have used it, and left it running). - */ - gfar_halt(dev); /* Reset MAC layer */ gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); /* We need to delay at least 3 TX clocks */ - udelay(2); + udelay(3); - tempval = 0; - if (!priv->pause_aneg_en && priv->tx_pause_en) - tempval |= MACCFG1_TX_FLOW; - if (!priv->pause_aneg_en && priv->rx_pause_en) - tempval |= MACCFG1_RX_FLOW; /* the soft reset bit is not self-resetting, so we need to * clear it before resuming normal operation */ - gfar_write(®s->maccfg1, tempval); + gfar_write(®s->maccfg1, 0); + + udelay(3); + + /* Compute rx_buff_size based on config flags */ + gfar_rx_buff_size_config(priv); + + /* Initialize the max receive frame/buffer lengths */ + gfar_write(®s->maxfrm, priv->rx_buffer_size); + gfar_write(®s->mrblr, priv->rx_buffer_size); + + /* Initialize the Minimum Frame Length Register */ + gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); /* Initialize MACCFG2. */ tempval = MACCFG2_INIT_SETTINGS; - if (gfar_has_errata(priv, GFAR_ERRATA_74)) + + /* If the mtu is larger than the max size for standard + * ethernet frames (ie, a jumbo frame), then set maccfg2 + * to allow huge frames, and to check the length + */ + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || + gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; + gfar_write(®s->maccfg2, tempval); + /* Clear mac addr hash registers */ + gfar_write(®s->igaddr0, 0); + gfar_write(®s->igaddr1, 0); + gfar_write(®s->igaddr2, 0); + gfar_write(®s->igaddr3, 0); + gfar_write(®s->igaddr4, 0); + gfar_write(®s->igaddr5, 0); + gfar_write(®s->igaddr6, 0); + gfar_write(®s->igaddr7, 0); + + gfar_write(®s->gaddr0, 0); + gfar_write(®s->gaddr1, 0); + gfar_write(®s->gaddr2, 0); + gfar_write(®s->gaddr3, 0); + gfar_write(®s->gaddr4, 0); + gfar_write(®s->gaddr5, 0); + gfar_write(®s->gaddr6, 0); + gfar_write(®s->gaddr7, 0); + + if (priv->extended_hash) + gfar_clear_exact_match(priv->ndev); + + gfar_mac_rx_config(priv); + + gfar_mac_tx_config(priv); + + gfar_set_mac_address(priv->ndev); + + gfar_set_multi(priv->ndev); + + /* clear ievent and imask before configuring coalescing */ + gfar_ints_disable(priv); + + /* Configure the coalescing support */ + gfar_configure_coalescing_all(priv); +} + +static void gfar_hw_init(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 attrs; + + /* Stop the DMA engine now, in case it was running before + * (The firmware could have used it, and left it running). + */ + gfar_halt(priv); + + gfar_mac_reset(priv); + + /* Zero out the rmon mib registers if it has them */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { + memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); + + /* Mask off the CAM interrupts */ + gfar_write(®s->rmon.cam1, 0xffffffff); + gfar_write(®s->rmon.cam2, 0xffffffff); + } + /* Initialize ECNTRL */ gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); - /* Set the dev->base_addr to the gfar reg region */ - dev->base_addr = (unsigned long) regs; + /* Set the extraction length and index */ + attrs = ATTRELI_EL(priv->rx_stash_size) | + ATTRELI_EI(priv->rx_stash_index); - /* Fill in the dev structure */ - dev->watchdog_timeo = TX_TIMEOUT; - dev->mtu = 1500; - dev->netdev_ops = &gfar_netdev_ops; - dev->ethtool_ops = &gfar_ethtool_ops; + gfar_write(®s->attreli, attrs); - /* Register for napi ...We are registering NAPI for each grp */ - if (priv->mode == SQ_SG_MODE) - netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, - GFAR_DEV_WEIGHT); - else - for (i = 0; i < priv->num_grps; i++) - netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, - GFAR_DEV_WEIGHT); + /* Start with defaults, and add stashing + * depending on driver parameters + */ + attrs = ATTR_INIT_SETTINGS; - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_RXCSUM; - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_RXCSUM | NETIF_F_HIGHDMA; - } + if (priv->bd_stash_en) + attrs |= ATTR_BDSTASH; - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; - dev->features |= NETIF_F_HW_VLAN_CTAG_RX; - } + if (priv->rx_stash_size != 0) + attrs |= ATTR_BUFSTASH; + + gfar_write(®s->attr, attrs); + + /* FIFO configs */ + gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); + gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); + gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); + + /* Program the interrupt steering regs, only for MG devices */ + if (priv->num_grps > 1) + gfar_write_isrg(priv); +} + +static void __init gfar_init_addr_hash_table(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { priv->extended_hash = 1; @@ -1133,68 +1268,81 @@ static int gfar_probe(struct platform_device *ofdev) priv->hash_regs[6] = ®s->gaddr6; priv->hash_regs[7] = ®s->gaddr7; } +} - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) - priv->padding = DEFAULT_PADDING; - else - priv->padding = 0; +/* Set up the ethernet device structure, private data, + * and anything else we need before we start + */ +static int gfar_probe(struct platform_device *ofdev) +{ + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + int err = 0, i; - if (dev->features & NETIF_F_IP_CSUM || - priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - dev->needed_headroom = GMAC_FCB_LEN; + err = gfar_of_init(ofdev, &dev); - /* Program the isrg regs only if number of grps > 1 */ - if (priv->num_grps > 1) { - baddr = ®s->isrg0; - for (i = 0; i < priv->num_grps; i++) { - isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); - isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); - gfar_write(baddr, isrg); - baddr++; - isrg = 0x0; + if (err) + return err; + + priv = netdev_priv(dev); + priv->ndev = dev; + priv->ofdev = ofdev; + priv->dev = &ofdev->dev; + SET_NETDEV_DEV(dev, &ofdev->dev); + + spin_lock_init(&priv->bflock); + INIT_WORK(&priv->reset_task, gfar_reset_task); + + platform_set_drvdata(ofdev, priv); + + gfar_detect_errata(priv); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long) priv->gfargrp[0].regs; + + /* Fill in the dev structure */ + dev->watchdog_timeo = TX_TIMEOUT; + dev->mtu = 1500; + dev->netdev_ops = &gfar_netdev_ops; + dev->ethtool_ops = &gfar_ethtool_ops; + + /* Register for napi ...We are registering NAPI for each grp */ + for (i = 0; i < priv->num_grps; i++) { + if (priv->poll_mode == GFAR_SQ_POLLING) { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); + } else { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx, GFAR_DEV_WEIGHT); + netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx, 2); } } - /* Need to reverse the bit maps as bit_map's MSB is q0 - * but, for_each_set_bit parses from right to left, which - * basically reverses the queue numbers - */ - for (i = 0; i< priv->num_grps; i++) { - priv->gfargrp[i].tx_bit_map = - reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS); - priv->gfargrp[i].rx_bit_map = - reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HIGHDMA; } - /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, - * also assign queues to groups - */ - for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { - priv->gfargrp[grp_idx].num_rx_queues = 0x0; - - for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, - priv->num_rx_queues) { - priv->gfargrp[grp_idx].num_rx_queues++; - priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; - rstat = rstat | (RSTAT_CLEAR_RHALT >> i); - rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); - } - priv->gfargrp[grp_idx].num_tx_queues = 0x0; - - for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, - priv->num_tx_queues) { - priv->gfargrp[grp_idx].num_tx_queues++; - priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; - tstat = tstat | (TSTAT_CLEAR_THALT >> i); - tqueue = tqueue | (TQUEUE_EN0 >> i); - } - priv->gfargrp[grp_idx].rstat = rstat; - priv->gfargrp[grp_idx].tstat = tstat; - rstat = tstat =0; + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; } - gfar_write(®s->rqueue, rqueue); - gfar_write(®s->tqueue, tqueue); + gfar_init_addr_hash_table(priv); + + /* Insert receive time stamps into padding alignment bytes */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + priv->padding = 8; + + if (dev->features & NETIF_F_IP_CSUM || + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + dev->needed_headroom = GMAC_FCB_LEN; priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; @@ -1220,8 +1368,9 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->num_tx_queues == 1) priv->prio_sched_en = 1; - /* Carrier starts down, phylib will bring it up */ - netif_carrier_off(dev); + set_bit(GFAR_DOWN, &priv->state); + + gfar_hw_init(priv); err = register_netdev(dev); @@ -1230,6 +1379,9 @@ static int gfar_probe(struct platform_device *ofdev) goto register_fail; } + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(dev); + device_init_wakeup(&dev->dev, priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); @@ -1251,9 +1403,6 @@ static int gfar_probe(struct platform_device *ofdev) /* Initialize the filer table */ gfar_init_filer_table(priv); - /* Create all the sysfs files */ - gfar_init_sysfs(dev); - /* Print out the device info */ netdev_info(dev, "mac: %pM\n", dev->dev_addr); @@ -1272,8 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev) register_fail: unmap_group_regs(priv); - free_tx_pointers(priv); - free_rx_pointers(priv); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) @@ -1293,6 +1442,8 @@ static int gfar_remove(struct platform_device *ofdev) unregister_netdev(priv->ndev); unmap_group_regs(priv); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); free_gfar_dev(priv); return 0; @@ -1318,9 +1469,8 @@ static int gfar_suspend(struct device *dev) local_irq_save(flags); lock_tx_qs(priv); - lock_rx_qs(priv); - gfar_halt_nodisable(ndev); + gfar_halt_nodisable(priv); /* Disable Tx, and Rx if wake-on-LAN is disabled. */ tempval = gfar_read(®s->maccfg1); @@ -1332,7 +1482,6 @@ static int gfar_suspend(struct device *dev) gfar_write(®s->maccfg1, tempval); - unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); @@ -1378,15 +1527,13 @@ static int gfar_resume(struct device *dev) */ local_irq_save(flags); lock_tx_qs(priv); - lock_rx_qs(priv); tempval = gfar_read(®s->maccfg2); tempval &= ~MACCFG2_MPEN; gfar_write(®s->maccfg2, tempval); - gfar_start(ndev); + gfar_start(priv); - unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); @@ -1413,10 +1560,11 @@ static int gfar_restore(struct device *dev) return -ENOMEM; } - init_registers(ndev); - gfar_set_mac_address(ndev); - gfar_init_mac(ndev); - gfar_start(ndev); + gfar_mac_reset(priv); + + gfar_init_tx_rx_base(priv); + + gfar_start(priv); priv->oldlink = 0; priv->oldspeed = 0; @@ -1574,57 +1722,6 @@ static void gfar_configure_serdes(struct net_device *dev) BMCR_SPEED1000); } -static void init_registers(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = NULL; - int i; - - for (i = 0; i < priv->num_grps; i++) { - regs = priv->gfargrp[i].regs; - /* Clear IEVENT */ - gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - - /* Initialize IMASK */ - gfar_write(®s->imask, IMASK_INIT_CLEAR); - } - - regs = priv->gfargrp[0].regs; - /* Init hash registers to zero */ - gfar_write(®s->igaddr0, 0); - gfar_write(®s->igaddr1, 0); - gfar_write(®s->igaddr2, 0); - gfar_write(®s->igaddr3, 0); - gfar_write(®s->igaddr4, 0); - gfar_write(®s->igaddr5, 0); - gfar_write(®s->igaddr6, 0); - gfar_write(®s->igaddr7, 0); - - gfar_write(®s->gaddr0, 0); - gfar_write(®s->gaddr1, 0); - gfar_write(®s->gaddr2, 0); - gfar_write(®s->gaddr3, 0); - gfar_write(®s->gaddr4, 0); - gfar_write(®s->gaddr5, 0); - gfar_write(®s->gaddr6, 0); - gfar_write(®s->gaddr7, 0); - - /* Zero out the rmon mib registers if it has them */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { - memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); - - /* Mask off the CAM interrupts */ - gfar_write(®s->rmon.cam1, 0xffffffff); - gfar_write(®s->rmon.cam2, 0xffffffff); - } - - /* Initialize the max receive buffer length */ - gfar_write(®s->mrblr, priv->rx_buffer_size); - - /* Initialize the Minimum Frame Length Register */ - gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); -} - static int __gfar_is_rx_idle(struct gfar_private *priv) { u32 res; @@ -1648,23 +1745,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv) } /* Halt the receive and transmit queues */ -static void gfar_halt_nodisable(struct net_device *dev) +static void gfar_halt_nodisable(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = NULL; + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - int i; - - for (i = 0; i < priv->num_grps; i++) { - regs = priv->gfargrp[i].regs; - /* Mask all interrupts */ - gfar_write(®s->imask, IMASK_INIT_CLEAR); - /* Clear all interrupts */ - gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - } + gfar_ints_disable(priv); - regs = priv->gfargrp[0].regs; /* Stop the DMA, and wait for it to stop */ tempval = gfar_read(®s->dmactrl); if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != @@ -1685,56 +1772,41 @@ static void gfar_halt_nodisable(struct net_device *dev) } /* Halt the receive and transmit queues */ -void gfar_halt(struct net_device *dev) +void gfar_halt(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - gfar_halt_nodisable(dev); + /* Dissable the Rx/Tx hw queues */ + gfar_write(®s->rqueue, 0); + gfar_write(®s->tqueue, 0); - /* Disable Rx and Tx */ + mdelay(10); + + gfar_halt_nodisable(priv); + + /* Disable Rx/Tx DMA */ tempval = gfar_read(®s->maccfg1); tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); gfar_write(®s->maccfg1, tempval); } -static void free_grp_irqs(struct gfar_priv_grp *grp) -{ - free_irq(gfar_irq(grp, TX)->irq, grp); - free_irq(gfar_irq(grp, RX)->irq, grp); - free_irq(gfar_irq(grp, ER)->irq, grp); -} - void stop_gfar(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; - int i; - - phy_stop(priv->phydev); + netif_tx_stop_all_queues(dev); - /* Lock it down */ - local_irq_save(flags); - lock_tx_qs(priv); - lock_rx_qs(priv); + smp_mb__before_clear_bit(); + set_bit(GFAR_DOWN, &priv->state); + smp_mb__after_clear_bit(); - gfar_halt(dev); + disable_napi(priv); - unlock_rx_qs(priv); - unlock_tx_qs(priv); - local_irq_restore(flags); + /* disable ints and gracefully shut down Rx/Tx DMA */ + gfar_halt(priv); - /* Free the IRQs */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - for (i = 0; i < priv->num_grps; i++) - free_grp_irqs(&priv->gfargrp[i]); - } else { - for (i = 0; i < priv->num_grps; i++) - free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, - &priv->gfargrp[i]); - } + phy_stop(priv->phydev); free_skb_resources(priv); } @@ -1825,17 +1897,15 @@ static void free_skb_resources(struct gfar_private *priv) priv->tx_queue[0]->tx_bd_dma_base); } -void gfar_start(struct net_device *dev) +void gfar_start(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; int i = 0; - /* Enable Rx and Tx in MACCFG1 */ - tempval = gfar_read(®s->maccfg1); - tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); - gfar_write(®s->maccfg1, tempval); + /* Enable Rx/Tx hw queues */ + gfar_write(®s->rqueue, priv->rqueue); + gfar_write(®s->tqueue, priv->tqueue); /* Initialize DMACTRL to have WWR and WOP */ tempval = gfar_read(®s->dmactrl); @@ -1852,52 +1922,23 @@ void gfar_start(struct net_device *dev) /* Clear THLT/RHLT, so that the DMA starts polling now */ gfar_write(®s->tstat, priv->gfargrp[i].tstat); gfar_write(®s->rstat, priv->gfargrp[i].rstat); - /* Unmask the interrupts we look for */ - gfar_write(®s->imask, IMASK_DEFAULT); } - dev->trans_start = jiffies; /* prevent tx timeout */ -} - -static void gfar_configure_coalescing(struct gfar_private *priv, - unsigned long tx_mask, unsigned long rx_mask) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 __iomem *baddr; - - if (priv->mode == MQ_MG_MODE) { - int i = 0; - - baddr = ®s->txic0; - for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { - gfar_write(baddr + i, 0); - if (likely(priv->tx_queue[i]->txcoalescing)) - gfar_write(baddr + i, priv->tx_queue[i]->txic); - } + /* Enable Rx/Tx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); - baddr = ®s->rxic0; - for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { - gfar_write(baddr + i, 0); - if (likely(priv->rx_queue[i]->rxcoalescing)) - gfar_write(baddr + i, priv->rx_queue[i]->rxic); - } - } else { - /* Backward compatible case -- even if we enable - * multiple queues, there's only single reg to program - */ - gfar_write(®s->txic, 0); - if (likely(priv->tx_queue[0]->txcoalescing)) - gfar_write(®s->txic, priv->tx_queue[0]->txic); + gfar_ints_enable(priv); - gfar_write(®s->rxic, 0); - if (unlikely(priv->rx_queue[0]->rxcoalescing)) - gfar_write(®s->rxic, priv->rx_queue[0]->rxic); - } + priv->ndev->trans_start = jiffies; /* prevent tx timeout */ } -void gfar_configure_coalescing_all(struct gfar_private *priv) +static void free_grp_irqs(struct gfar_priv_grp *grp) { - gfar_configure_coalescing(priv, 0xFF, 0xFF); + free_irq(gfar_irq(grp, TX)->irq, grp); + free_irq(gfar_irq(grp, RX)->irq, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); } static int register_grp_irqs(struct gfar_priv_grp *grp) @@ -1956,46 +1997,65 @@ err_irq_fail: } -/* Bring the controller up and running */ -int startup_gfar(struct net_device *ndev) +static void gfar_free_irq(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(ndev); - struct gfar __iomem *regs = NULL; - int err, i, j; + int i; - for (i = 0; i < priv->num_grps; i++) { - regs= priv->gfargrp[i].regs; - gfar_write(®s->imask, IMASK_INIT_CLEAR); + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) + free_grp_irqs(&priv->gfargrp[i]); + } else { + for (i = 0; i < priv->num_grps; i++) + free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, + &priv->gfargrp[i]); } +} - regs= priv->gfargrp[0].regs; - err = gfar_alloc_skb_resources(ndev); - if (err) - return err; - - gfar_init_mac(ndev); +static int gfar_request_irq(struct gfar_private *priv) +{ + int err, i, j; for (i = 0; i < priv->num_grps; i++) { err = register_grp_irqs(&priv->gfargrp[i]); if (err) { for (j = 0; j < i; j++) free_grp_irqs(&priv->gfargrp[j]); - goto irq_fail; + return err; } } - /* Start the controller */ - gfar_start(ndev); + return 0; +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + int err; + + gfar_mac_reset(priv); + + err = gfar_alloc_skb_resources(ndev); + if (err) + return err; + + gfar_init_tx_rx_base(priv); + + smp_mb__before_clear_bit(); + clear_bit(GFAR_DOWN, &priv->state); + smp_mb__after_clear_bit(); + + /* Start Rx/Tx DMA and enable the interrupts */ + gfar_start(priv); phy_start(priv->phydev); - gfar_configure_coalescing_all(priv); + enable_napi(priv); - return 0; + netif_tx_wake_all_queues(ndev); -irq_fail: - free_skb_resources(priv); - return err; + return 0; } /* Called when something needs to use the ethernet device @@ -2006,27 +2066,17 @@ static int gfar_enet_open(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); int err; - enable_napi(priv); - - /* Initialize a bunch of registers */ - init_registers(dev); - - gfar_set_mac_address(dev); - err = init_phy(dev); + if (err) + return err; - if (err) { - disable_napi(priv); + err = gfar_request_irq(priv); + if (err) return err; - } err = startup_gfar(dev); - if (err) { - disable_napi(priv); + if (err) return err; - } - - netif_tx_start_all_queues(dev); device_set_wakeup_enable(&dev->dev, priv->wol_en); @@ -2152,13 +2202,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_new = skb_realloc_headroom(skb, fcb_len); if (!skb_new) { dev->stats.tx_errors++; - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->sk) skb_set_owner_w(skb_new, skb->sk); - consume_skb(skb); + dev_consume_skb_any(skb); skb = skb_new; } @@ -2351,8 +2401,6 @@ static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - disable_napi(priv); - cancel_work_sync(&priv->reset_task); stop_gfar(dev); @@ -2360,7 +2408,7 @@ static int gfar_close(struct net_device *dev) phy_disconnect(priv->phydev); priv->phydev = NULL; - netif_tx_stop_all_queues(dev); + gfar_free_irq(priv); return 0; } @@ -2373,77 +2421,9 @@ static int gfar_set_mac_address(struct net_device *dev) return 0; } -/* Check if rx parser should be activated */ -void gfar_check_rx_parser_mode(struct gfar_private *priv) -{ - struct gfar __iomem *regs; - u32 tempval; - - regs = priv->gfargrp[0].regs; - - tempval = gfar_read(®s->rctrl); - /* If parse is no longer required, then disable parser */ - if (tempval & RCTRL_REQ_PARSER) { - tempval |= RCTRL_PRSDEP_INIT; - priv->uses_rxfcb = 1; - } else { - tempval &= ~RCTRL_PRSDEP_INIT; - priv->uses_rxfcb = 0; - } - gfar_write(®s->rctrl, tempval); -} - -/* Enables and disables VLAN insertion/extraction */ -void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = NULL; - unsigned long flags; - u32 tempval; - - regs = priv->gfargrp[0].regs; - local_irq_save(flags); - lock_rx_qs(priv); - - if (features & NETIF_F_HW_VLAN_CTAG_TX) { - /* Enable VLAN tag insertion */ - tempval = gfar_read(®s->tctrl); - tempval |= TCTRL_VLINS; - gfar_write(®s->tctrl, tempval); - } else { - /* Disable VLAN tag insertion */ - tempval = gfar_read(®s->tctrl); - tempval &= ~TCTRL_VLINS; - gfar_write(®s->tctrl, tempval); - } - - if (features & NETIF_F_HW_VLAN_CTAG_RX) { - /* Enable VLAN tag extraction */ - tempval = gfar_read(®s->rctrl); - tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); - gfar_write(®s->rctrl, tempval); - priv->uses_rxfcb = 1; - } else { - /* Disable VLAN tag extraction */ - tempval = gfar_read(®s->rctrl); - tempval &= ~RCTRL_VLEX; - gfar_write(®s->rctrl, tempval); - - gfar_check_rx_parser_mode(priv); - } - - gfar_change_mtu(dev, dev->mtu); - - unlock_rx_qs(priv); - local_irq_restore(flags); -} - static int gfar_change_mtu(struct net_device *dev, int new_mtu) { - int tempsize, tempval; struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - int oldsize = priv->rx_buffer_size; int frame_size = new_mtu + ETH_HLEN; if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { @@ -2451,45 +2431,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; } - if (priv->uses_rxfcb) - frame_size += GMAC_FCB_LEN; + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); - frame_size += priv->padding; - - tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + - INCREMENTAL_BUFFER_SIZE; - - /* Only stop and start the controller if it isn't already - * stopped, and we changed something - */ - if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + if (dev->flags & IFF_UP) stop_gfar(dev); - priv->rx_buffer_size = tempsize; - dev->mtu = new_mtu; - gfar_write(®s->mrblr, priv->rx_buffer_size); - gfar_write(®s->maxfrm, priv->rx_buffer_size); + if (dev->flags & IFF_UP) + startup_gfar(dev); - /* If the mtu is larger than the max size for standard - * ethernet frames (ie, a jumbo frame), then set maccfg2 - * to allow huge frames, and to check the length - */ - tempval = gfar_read(®s->maccfg2); + clear_bit_unlock(GFAR_RESETTING, &priv->state); - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || - gfar_has_errata(priv, GFAR_ERRATA_74)) - tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); - else - tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + return 0; +} - gfar_write(®s->maccfg2, tempval); +void reset_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); - if ((oldsize != tempsize) && (dev->flags & IFF_UP)) - startup_gfar(dev); + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); - return 0; + stop_gfar(ndev); + startup_gfar(ndev); + + clear_bit_unlock(GFAR_RESETTING, &priv->state); } /* gfar_reset_task gets scheduled when a packet has not been @@ -2501,16 +2469,7 @@ static void gfar_reset_task(struct work_struct *work) { struct gfar_private *priv = container_of(work, struct gfar_private, reset_task); - struct net_device *dev = priv->ndev; - - if (dev->flags & IFF_UP) { - netif_tx_stop_all_queues(dev); - stop_gfar(dev); - startup_gfar(dev); - netif_tx_start_all_queues(dev); - } - - netif_tx_schedule_all(dev); + reset_gfar(priv->ndev); } static void gfar_timeout(struct net_device *dev) @@ -2623,8 +2582,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) } /* If we freed a buffer, we can restart transmission, if necessary */ - if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) - netif_wake_subqueue(dev, tqi); + if (tx_queue->num_txbdfree && + netif_tx_queue_stopped(txq) && + !(test_bit(GFAR_DOWN, &priv->state))) + netif_wake_subqueue(priv->ndev, tqi); /* Update dirty indicators */ tx_queue->skb_dirtytx = skb_dirtytx; @@ -2633,31 +2594,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) netdev_tx_completed_queue(txq, howmany, bytes_sent); } -static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) -{ - unsigned long flags; - - spin_lock_irqsave(&gfargrp->grplock, flags); - if (napi_schedule_prep(&gfargrp->napi)) { - gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); - __napi_schedule(&gfargrp->napi); - } else { - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived. - */ - gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); - } - spin_unlock_irqrestore(&gfargrp->grplock, flags); - -} - -/* Interrupt Handler for Transmit complete */ -static irqreturn_t gfar_transmit(int irq, void *grp_id) -{ - gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); - return IRQ_HANDLED; -} - static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct sk_buff *skb) { @@ -2728,7 +2664,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev) irqreturn_t gfar_receive(int irq, void *grp_id) { - gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask; + + if (likely(napi_schedule_prep(&grp->napi_rx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_RX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_rx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); + } + + return IRQ_HANDLED; +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *grp_id) +{ + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask; + + if (likely(napi_schedule_prep(&grp->napi_tx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_TX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_tx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); + } + return IRQ_HANDLED; } @@ -2852,7 +2829,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) rx_queue->stats.rx_bytes += pkt_len; skb_record_rx_queue(skb, rx_queue->qindex); gfar_process_frame(dev, skb, amount_pull, - &rx_queue->grp->napi); + &rx_queue->grp->napi_rx); } else { netif_warn(priv, rx_err, dev, "Missing skb!\n"); @@ -2881,66 +2858,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) return howmany; } -static int gfar_poll_sq(struct napi_struct *napi, int budget) +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) { struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi); + container_of(napi, struct gfar_priv_grp, napi_rx); struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; - struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; + struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; int work_done = 0; /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived */ - gfar_write(®s->ievent, IEVENT_RTX_MASK); - - /* run Tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) - gfar_clean_tx_ring(tx_queue); + gfar_write(®s->ievent, IEVENT_RX_MASK); work_done = gfar_clean_rx_ring(rx_queue, budget); if (work_done < budget) { + u32 imask; napi_complete(napi); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); - gfar_write(®s->imask, IMASK_DEFAULT); - - /* If we are coalescing interrupts, update the timer - * Otherwise, clear it - */ - gfar_write(®s->txic, 0); - if (likely(tx_queue->txcoalescing)) - gfar_write(®s->txic, tx_queue->txic); - - gfar_write(®s->rxic, 0); - if (unlikely(rx_queue->rxcoalescing)) - gfar_write(®s->rxic, rx_queue->rxic); + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); } return work_done; } -static int gfar_poll(struct napi_struct *napi, int budget) +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; + u32 imask; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) + gfar_clean_tx_ring(tx_queue); + + napi_complete(napi); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + + return 0; +} + +static int gfar_poll_rx(struct napi_struct *napi, int budget) { struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi); + container_of(napi, struct gfar_priv_grp, napi_rx); struct gfar_private *priv = gfargrp->priv; struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; int work_done = 0, work_done_per_q = 0; int i, budget_per_q = 0; - int has_tx_work = 0; unsigned long rstat_rxf; int num_act_queues; /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived */ - gfar_write(®s->ievent, IEVENT_RTX_MASK); + gfar_write(®s->ievent, IEVENT_RX_MASK); rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; @@ -2948,15 +2940,6 @@ static int gfar_poll(struct napi_struct *napi, int budget) if (num_act_queues) budget_per_q = budget/num_act_queues; - for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { - tx_queue = priv->tx_queue[i]; - /* run Tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { - gfar_clean_tx_ring(tx_queue); - has_tx_work = 1; - } - } - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { /* skip queue if not active */ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) @@ -2979,25 +2962,62 @@ static int gfar_poll(struct napi_struct *napi, int budget) } } - if (!num_act_queues && !has_tx_work) { - + if (!num_act_queues) { + u32 imask; napi_complete(napi); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); - gfar_write(®s->imask, IMASK_DEFAULT); - - /* If we are coalescing interrupts, update the timer - * Otherwise, clear it - */ - gfar_configure_coalescing(priv, gfargrp->rx_bit_map, - gfargrp->tx_bit_map); + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); } return work_done; } +static int gfar_poll_tx(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = NULL; + int has_tx_work = 0; + int i; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { + tx_queue = priv->tx_queue[i]; + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { + gfar_clean_tx_ring(tx_queue); + has_tx_work = 1; + } + } + + if (!has_tx_work) { + u32 imask; + napi_complete(napi); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return 0; +} + + #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while @@ -3101,12 +3121,11 @@ static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; struct phy_device *phydev = priv->phydev; int new_state = 0; - local_irq_save(flags); - lock_tx_qs(priv); + if (test_bit(GFAR_RESETTING, &priv->state)) + return; if (phydev->link) { u32 tempval1 = gfar_read(®s->maccfg1); @@ -3178,8 +3197,6 @@ static void adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - unlock_tx_qs(priv); - local_irq_restore(flags); } /* Update the hash table based on the current list of multicast diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 52bb2b0195cc..84632c569f2c 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -9,7 +9,7 @@ * Maintainer: Kumar Gala * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * - * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -377,8 +377,11 @@ extern const char gfar_driver_version[]; IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ | IMASK_PERR) -#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ - & IMASK_DEFAULT) +#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY) +#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN) + +#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT) +#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT) /* Fifo management */ #define FIFO_TX_THR_MASK 0x01ff @@ -409,7 +412,9 @@ extern const char gfar_driver_version[]; /* This default RIR value directly corresponds * to the 3-bit hash value generated */ -#define DEFAULT_RIR0 0x05397700 +#define DEFAULT_8RXQ_RIR0 0x05397700 +/* Map even hash values to Q0, and odd ones to Q1 */ +#define DEFAULT_2RXQ_RIR0 0x04104100 /* RQFCR register bits */ #define RQFCR_GPI 0x80000000 @@ -880,7 +885,6 @@ struct gfar { #define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 #define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 #define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 -#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080 #define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 @@ -892,8 +896,8 @@ struct gfar { #define DEFAULT_MAPPING 0xFF #endif -#define ISRG_SHIFT_TX 0x10 -#define ISRG_SHIFT_RX 0x18 +#define ISRG_RR0 0x80000000 +#define ISRG_TR0 0x00800000 /* The same driver can operate in two modes */ /* SQ_SG_MODE: Single Queue Single Group Mode @@ -905,6 +909,22 @@ enum { MQ_MG_MODE }; +/* GFAR_SQ_POLLING: Single Queue NAPI polling mode + * The driver supports a single pair of RX/Tx queues + * per interrupt group (Rx/Tx int line). MQ_MG mode + * devices have 2 interrupt groups, so the device will + * have a total of 2 Tx and 2 Rx queues in this case. + * GFAR_MQ_POLLING: Multi Queue NAPI polling mode + * The driver supports all the 8 Rx and Tx HW queues + * each queue mapped by the Device Tree to one of + * the 2 interrupt groups. This mode implies significant + * processing overhead (CPU and controller level). + */ +enum gfar_poll_mode { + GFAR_SQ_POLLING = 0, + GFAR_MQ_POLLING +}; + /* * Per TX queue stats */ @@ -966,7 +986,6 @@ struct rx_q_stats { /** * struct gfar_priv_rx_q - per rx queue structure - * @rxlock: per queue rx spin lock * @rx_skbuff: skb pointers * @skb_currx: currently use skb pointer * @rx_bd_base: First rx buffer descriptor @@ -979,8 +998,7 @@ struct rx_q_stats { */ struct gfar_priv_rx_q { - spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES))); - struct sk_buff ** rx_skbuff; + struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); dma_addr_t rx_bd_dma_base; struct rxbd8 *rx_bd_base; struct rxbd8 *cur_rx; @@ -1016,17 +1034,20 @@ struct gfar_irqinfo { */ struct gfar_priv_grp { - spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); - struct napi_struct napi; - struct gfar_private *priv; + spinlock_t grplock __aligned(SMP_CACHE_BYTES); + struct napi_struct napi_rx; + struct napi_struct napi_tx; struct gfar __iomem *regs; - unsigned int rstat; - unsigned long num_rx_queues; - unsigned long rx_bit_map; - /* cacheline 3 */ + struct gfar_priv_tx_q *tx_queue; + struct gfar_priv_rx_q *rx_queue; unsigned int tstat; + unsigned int rstat; + + struct gfar_private *priv; unsigned long num_tx_queues; unsigned long tx_bit_map; + unsigned long num_rx_queues; + unsigned long rx_bit_map; struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; }; @@ -1041,6 +1062,11 @@ enum gfar_errata { GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ }; +enum gfar_dev_state { + GFAR_DOWN = 1, + GFAR_RESETTING +}; + /* Struct stolen almost completely (and shamelessly) from the FCC enet source * (Ok, that's not so true anymore, but there is a family resemblance) * The GFAR buffer descriptors track the ring buffers. The rx_bd_base @@ -1051,8 +1077,6 @@ enum gfar_errata { * the buffer descriptor determines the actual condition. */ struct gfar_private { - unsigned int num_rx_queues; - struct device *dev; struct net_device *ndev; enum gfar_errata errata; @@ -1060,6 +1084,7 @@ struct gfar_private { u16 uses_rxfcb; u16 padding; + u32 device_flags; /* HW time stamping enabled flag */ int hwts_rx_en; @@ -1069,10 +1094,12 @@ struct gfar_private { struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; struct gfar_priv_grp gfargrp[MAXGROUPS]; - u32 device_flags; + unsigned long state; - unsigned int mode; + unsigned short mode; + unsigned short poll_mode; unsigned int num_tx_queues; + unsigned int num_rx_queues; unsigned int num_grps; /* Network Statistics */ @@ -1113,6 +1140,9 @@ struct gfar_private { unsigned int total_tx_ring_size; unsigned int total_rx_ring_size; + u32 rqueue; + u32 tqueue; + /* RX per device parameters */ unsigned int rx_stash_size; unsigned int rx_stash_index; @@ -1127,11 +1157,6 @@ struct gfar_private { u32 __iomem *hash_regs[16]; int hash_width; - /* global parameters */ - unsigned int fifo_threshold; - unsigned int fifo_starve; - unsigned int fifo_starve_off; - /*Filer table*/ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; @@ -1176,21 +1201,42 @@ static inline void gfar_read_filer(struct gfar_private *priv, *fpr = gfar_read(®s->rqfpr); } -void lock_rx_qs(struct gfar_private *priv); -void lock_tx_qs(struct gfar_private *priv); -void unlock_rx_qs(struct gfar_private *priv); -void unlock_tx_qs(struct gfar_private *priv); +static inline void gfar_write_isrg(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr = ®s->isrg0; + u32 isrg = 0; + int grp_idx, i; + + for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { + struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx]; + + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { + isrg |= (ISRG_RR0 >> i); + } + + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { + isrg |= (ISRG_TR0 >> i); + } + + gfar_write(baddr, isrg); + + baddr++; + isrg = 0; + } +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); -void gfar_halt(struct net_device *dev); +void reset_gfar(struct net_device *dev); +void gfar_mac_reset(struct gfar_private *priv); +void gfar_halt(struct gfar_private *priv); +void gfar_start(struct gfar_private *priv); void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, u32 regnum, u32 read); void gfar_configure_coalescing_all(struct gfar_private *priv); -void gfar_init_sysfs(struct net_device *dev); int gfar_set_features(struct net_device *dev, netdev_features_t features); -void gfar_check_rx_parser_mode(struct gfar_private *priv); -void gfar_vlan_mode(struct net_device *dev, netdev_features_t features); extern const struct ethtool_ops gfar_ethtool_ops; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 63d234419cc1..891dbee6e6c1 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -44,10 +44,6 @@ #include "gianfar.h" -extern void gfar_start(struct net_device *dev); -extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, - int rx_work_limit); - #define GFAR_MAX_COAL_USECS 0xffff #define GFAR_MAX_COAL_FRAMES 0xff static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, @@ -364,25 +360,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) { struct gfar_private *priv = netdev_priv(dev); - int i = 0; + int i, err = 0; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; - /* Set up rx coalescing */ - /* As of now, we will enable/disable coalescing for all - * queues together in case of eTSEC2, this will be modified - * along with the ethtool interface - */ - if ((cvals->rx_coalesce_usecs == 0) || - (cvals->rx_max_coalesced_frames == 0)) { - for (i = 0; i < priv->num_rx_queues; i++) - priv->rx_queue[i]->rxcoalescing = 0; - } else { - for (i = 0; i < priv->num_rx_queues; i++) - priv->rx_queue[i]->rxcoalescing = 1; - } - if (NULL == priv->phydev) return -ENODEV; @@ -399,6 +381,32 @@ static int gfar_scoalesce(struct net_device *dev, return -EINVAL; } + /* Check the bounds of the values */ + if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + netdev_info(dev, "Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); + return -EINVAL; + } + + if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + netdev_info(dev, "Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + /* Set up rx coalescing */ + if ((cvals->rx_coalesce_usecs == 0) || + (cvals->rx_max_coalesced_frames == 0)) { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 0; + } else { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 1; + } + for (i = 0; i < priv->num_rx_queues; i++) { priv->rx_queue[i]->rxic = mk_ic_value( cvals->rx_max_coalesced_frames, @@ -415,28 +423,22 @@ static int gfar_scoalesce(struct net_device *dev, priv->tx_queue[i]->txcoalescing = 1; } - /* Check the bounds of the values */ - if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { - netdev_info(dev, "Coalescing is limited to %d microseconds\n", - GFAR_MAX_COAL_USECS); - return -EINVAL; - } - - if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { - netdev_info(dev, "Coalescing is limited to %d frames\n", - GFAR_MAX_COAL_FRAMES); - return -EINVAL; - } - for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->txic = mk_ic_value( cvals->tx_max_coalesced_frames, gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); } - gfar_configure_coalescing_all(priv); + if (dev->flags & IFF_UP) { + stop_gfar(dev); + err = startup_gfar(dev); + } else { + gfar_mac_reset(priv); + } + + clear_bit_unlock(GFAR_RESETTING, &priv->state); - return 0; + return err; } /* Fills in rvals with the current ring parameters. Currently, @@ -467,15 +469,13 @@ static void gfar_gringparam(struct net_device *dev, } /* Change the current ring parameters, stopping the controller if - * necessary so that we don't mess things up while we're in - * motion. We wait for the ring to be clean before reallocating - * the rings. + * necessary so that we don't mess things up while we're in motion. */ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) { struct gfar_private *priv = netdev_priv(dev); - int err = 0, i = 0; + int err = 0, i; if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) return -EINVAL; @@ -493,44 +493,25 @@ static int gfar_sringparam(struct net_device *dev, return -EINVAL; } + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); - if (dev->flags & IFF_UP) { - unsigned long flags; - - /* Halt TX and RX, and process the frames which - * have already been received - */ - local_irq_save(flags); - lock_tx_qs(priv); - lock_rx_qs(priv); - - gfar_halt(dev); - - unlock_rx_qs(priv); - unlock_tx_qs(priv); - local_irq_restore(flags); - - for (i = 0; i < priv->num_rx_queues; i++) - gfar_clean_rx_ring(priv->rx_queue[i], - priv->rx_queue[i]->rx_ring_size); - - /* Now we take down the rings to rebuild them */ + if (dev->flags & IFF_UP) stop_gfar(dev); - } - /* Change the size */ - for (i = 0; i < priv->num_rx_queues; i++) { + /* Change the sizes */ + for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; + + for (i = 0; i < priv->num_tx_queues; i++) priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; - priv->tx_queue[i]->num_txbdfree = - priv->tx_queue[i]->tx_ring_size; - } /* Rebuild the rings with the new size */ - if (dev->flags & IFF_UP) { + if (dev->flags & IFF_UP) err = startup_gfar(dev); - netif_tx_wake_all_queues(dev); - } + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + return err; } @@ -608,43 +589,29 @@ static int gfar_spauseparam(struct net_device *dev, int gfar_set_features(struct net_device *dev, netdev_features_t features) { - struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; - int err = 0, i = 0; netdev_features_t changed = dev->features ^ features; + struct gfar_private *priv = netdev_priv(dev); + int err = 0; - if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) - gfar_vlan_mode(dev, features); - - if (!(changed & NETIF_F_RXCSUM)) + if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_RXCSUM))) return 0; - if (dev->flags & IFF_UP) { - /* Halt TX and RX, and process the frames which - * have already been received - */ - local_irq_save(flags); - lock_tx_qs(priv); - lock_rx_qs(priv); - - gfar_halt(dev); + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); - unlock_tx_qs(priv); - unlock_rx_qs(priv); - local_irq_restore(flags); - - for (i = 0; i < priv->num_rx_queues; i++) - gfar_clean_rx_ring(priv->rx_queue[i], - priv->rx_queue[i]->rx_ring_size); + dev->features = features; + if (dev->flags & IFF_UP) { /* Now we take down the rings to rebuild them */ stop_gfar(dev); - - dev->features = features; - err = startup_gfar(dev); - netif_tx_wake_all_queues(dev); + } else { + gfar_mac_reset(priv); } + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + return err; } @@ -1610,9 +1577,6 @@ static int gfar_write_filer_table(struct gfar_private *priv, if (tab->index > MAX_FILER_IDX - 1) return -EBUSY; - /* Avoid inconsistent filer table to be processed */ - lock_rx_qs(priv); - /* Fill regular entries */ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++) @@ -1625,8 +1589,6 @@ static int gfar_write_filer_table(struct gfar_private *priv, */ gfar_write_filer(priv, i, 0x20, 0x0); - unlock_rx_qs(priv); - return 0; } @@ -1831,6 +1793,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) struct gfar_private *priv = netdev_priv(dev); int ret = 0; + if (test_bit(GFAR_RESETTING, &priv->state)) + return -EBUSY; + mutex_lock(&priv->rx_queue_access); switch (cmd->cmd) { diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index abc28da27042..bb568006f37d 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -414,6 +414,7 @@ static struct ptp_clock_info ptp_gianfar_caps = { .n_alarm = 0, .n_ext_ts = N_EXT_TS, .n_per_out = 0, + .n_pins = 0, .pps = 1, .adjfreq = ptp_gianfar_adjfreq, .adjtime = ptp_gianfar_adjtime, diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c deleted file mode 100644 index e02dd1378751..000000000000 --- a/drivers/net/ethernet/freescale/gianfar_sysfs.c +++ /dev/null @@ -1,340 +0,0 @@ -/* - * drivers/net/ethernet/freescale/gianfar_sysfs.c - * - * Gianfar Ethernet Driver - * This driver is designed for the non-CPM ethernet controllers - * on the 85xx and 83xx family of integrated processors - * Based on 8260_io/fcc_enet.c - * - * Author: Andy Fleming - * Maintainer: Kumar Gala (galak@kernel.crashing.org) - * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> - * - * Copyright 2002-2009 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * Sysfs file creation and management - */ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/unistd.h> -#include <linux/delay.h> -#include <linux/etherdevice.h> -#include <linux/spinlock.h> -#include <linux/mm.h> -#include <linux/device.h> - -#include <asm/uaccess.h> -#include <linux/module.h> - -#include "gianfar.h" - -static ssize_t gfar_show_bd_stash(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off"); -} - -static ssize_t gfar_set_bd_stash(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - int new_setting = 0; - u32 temp; - unsigned long flags; - - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) - return count; - - - /* Find out the new setting */ - if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) - new_setting = 1; - else if (!strncmp("off", buf, count - 1) || - !strncmp("0", buf, count - 1)) - new_setting = 0; - else - return count; - - - local_irq_save(flags); - lock_rx_qs(priv); - - /* Set the new stashing value */ - priv->bd_stash_en = new_setting; - - temp = gfar_read(®s->attr); - - if (new_setting) - temp |= ATTR_BDSTASH; - else - temp &= ~(ATTR_BDSTASH); - - gfar_write(®s->attr, temp); - - unlock_rx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); - -static ssize_t gfar_show_rx_stash_size(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->rx_stash_size); -} - -static ssize_t gfar_set_rx_stash_size(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int length = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) - return count; - - local_irq_save(flags); - lock_rx_qs(priv); - - if (length > priv->rx_buffer_size) - goto out; - - if (length == priv->rx_stash_size) - goto out; - - priv->rx_stash_size = length; - - temp = gfar_read(®s->attreli); - temp &= ~ATTRELI_EL_MASK; - temp |= ATTRELI_EL(length); - gfar_write(®s->attreli, temp); - - /* Turn stashing on/off as appropriate */ - temp = gfar_read(®s->attr); - - if (length) - temp |= ATTR_BUFSTASH; - else - temp &= ~(ATTR_BUFSTASH); - - gfar_write(®s->attr, temp); - -out: - unlock_rx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, - gfar_set_rx_stash_size); - -/* Stashing will only be enabled when rx_stash_size != 0 */ -static ssize_t gfar_show_rx_stash_index(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->rx_stash_index); -} - -static ssize_t gfar_set_rx_stash_index(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned short index = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) - return count; - - local_irq_save(flags); - lock_rx_qs(priv); - - if (index > priv->rx_stash_size) - goto out; - - if (index == priv->rx_stash_index) - goto out; - - priv->rx_stash_index = index; - - temp = gfar_read(®s->attreli); - temp &= ~ATTRELI_EI_MASK; - temp |= ATTRELI_EI(index); - gfar_write(®s->attreli, temp); - -out: - unlock_rx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, - gfar_set_rx_stash_index); - -static ssize_t gfar_show_fifo_threshold(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->fifo_threshold); -} - -static ssize_t gfar_set_fifo_threshold(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int length = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (length > GFAR_MAX_FIFO_THRESHOLD) - return count; - - local_irq_save(flags); - lock_tx_qs(priv); - - priv->fifo_threshold = length; - - temp = gfar_read(®s->fifo_tx_thr); - temp &= ~FIFO_TX_THR_MASK; - temp |= length; - gfar_write(®s->fifo_tx_thr, temp); - - unlock_tx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, - gfar_set_fifo_threshold); - -static ssize_t gfar_show_fifo_starve(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->fifo_starve); -} - -static ssize_t gfar_set_fifo_starve(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int num = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (num > GFAR_MAX_FIFO_STARVE) - return count; - - local_irq_save(flags); - lock_tx_qs(priv); - - priv->fifo_starve = num; - - temp = gfar_read(®s->fifo_tx_starve); - temp &= ~FIFO_TX_STARVE_MASK; - temp |= num; - gfar_write(®s->fifo_tx_starve, temp); - - unlock_tx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, - gfar_set_fifo_starve); - -static ssize_t gfar_show_fifo_starve_off(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->fifo_starve_off); -} - -static ssize_t gfar_set_fifo_starve_off(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int num = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (num > GFAR_MAX_FIFO_STARVE_OFF) - return count; - - local_irq_save(flags); - lock_tx_qs(priv); - - priv->fifo_starve_off = num; - - temp = gfar_read(®s->fifo_tx_starve_shutoff); - temp &= ~FIFO_TX_STARVE_OFF_MASK; - temp |= num; - gfar_write(®s->fifo_tx_starve_shutoff, temp); - - unlock_tx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, - gfar_set_fifo_starve_off); - -void gfar_init_sysfs(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - int rc; - - /* Initialize the default values */ - priv->fifo_threshold = DEFAULT_FIFO_TX_THR; - priv->fifo_starve = DEFAULT_FIFO_TX_STARVE; - priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF; - - /* Create our sysfs files */ - rc = device_create_file(&dev->dev, &dev_attr_bd_stash); - rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size); - rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index); - rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold); - rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve); - rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off); - if (rc) - dev_err(&dev->dev, "Error creating gianfar sysfs files\n"); -} diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 72291a8904a9..c8299c31b21f 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3261,7 +3261,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) dev->stats.tx_packets++; - dev_kfree_skb(skb); + dev_consume_skb_any(skb); ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; ugeth->skb_dirtytx[txQ] = diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 17fca323c143..c984998b34a0 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -993,7 +993,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->name)); dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); } else { if (++lp->next_tx_cmd == TX_RING_SIZE) lp->next_tx_cmd = 0; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 7628e0fd8455..538903bf13bc 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -490,7 +490,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, skb_arr[index] = skb; tmp_addr = ehea_map_vaddr(skb->data); if (tmp_addr == -1) { - dev_kfree_skb(skb); + dev_consume_skb_any(skb); q_skba->os_skbs = fill_wqes - i; ret = 0; break; @@ -856,7 +856,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); skb = pr->sq_skba.arr[index]; - dev_kfree_skb(skb); + dev_consume_skb_any(skb); pr->sq_skba.arr[index] = NULL; } @@ -2044,7 +2044,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, skb_copy_bits(skb, 0, imm_data, skb->len); swqe->immediate_data_length = skb->len; - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 4be971590461..c9127562bd22 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -522,10 +522,21 @@ retry: return rc; } +static u64 ibmveth_encode_mac_addr(u8 *mac) +{ + int i; + u64 encoded = 0; + + for (i = 0; i < ETH_ALEN; i++) + encoded = (encoded << 8) | mac[i]; + + return encoded; +} + static int ibmveth_open(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); - u64 mac_address = 0; + u64 mac_address; int rxq_entries = 1; unsigned long lpar_rc; int rc; @@ -579,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.num_slots = rxq_entries; adapter->rx_queue.toggle = 1; - memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); - mac_address = mac_address >> 16; + mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; @@ -1034,7 +1044,7 @@ retry_bounce: DMA_TO_DEVICE); out: - dev_kfree_skb(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; map_failed_frags: @@ -1062,7 +1072,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) unsigned long lpar_rc; restart_poll: - do { + while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; @@ -1111,7 +1121,7 @@ restart_poll: netdev->stats.rx_bytes += length; frames_processed++; } - } while (frames_processed < budget); + } ibmveth_replenish_task(adapter); @@ -1183,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) /* add the addresses to the filter table */ netdev_for_each_mc_addr(ha, netdev) { /* add the multicast address to the filter table */ - unsigned long mcast_addr = 0; - memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); + u64 mcast_addr; + mcast_addr = ibmveth_encode_mac_addr(ha->addr); lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastAddFilter, mcast_addr); @@ -1372,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); - adapter->mac_addr = 0; - memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN); - netdev->irq = dev->irq; netdev->netdev_ops = &ibmveth_netdev_ops; netdev->ethtool_ops = &netdev_ethtool_ops; @@ -1383,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->features |= netdev->hw_features; - memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); + memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 451ba7949e15..1f37499d4398 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -138,7 +138,6 @@ struct ibmveth_adapter { struct napi_struct napi; struct net_device_stats stats; unsigned int mcastFilterSize; - unsigned long mac_addr; void * buffer_list_addr; void * filter_list_addr; dma_addr_t buffer_list_dma; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index bf7a01ef9a57..b56461ce674c 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb, * testing, ie sending frames with bad CRC. */ if (unlikely(skb->no_fcs)) - cb->command |= __constant_cpu_to_le16(cb_tx_nc); + cb->command |= cpu_to_le16(cb_tx_nc); else - cb->command &= ~__constant_cpu_to_le16(cb_tx_nc); + cb->command &= ~cpu_to_le16(cb_tx_nc); /* interrupt every 16 packets regardless of delay */ if ((nic->cbs_avail & ~15) == nic->cbs_avail) diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index ff2d806eaef7..a5f6b11d6992 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ /* 80003ES2LAN Gigabit Ethernet Controller (Copper) * 80003ES2LAN Gigabit Ethernet Controller (Serdes) diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h index 90d363b2d280..535a9430976d 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_80003ES2LAN_H_ #define _E1000E_80003ES2LAN_H_ diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 8fed74e3fa53..e0aa7f1efb08 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ /* 82571EB Gigabit Ethernet Controller * 82571EB Gigabit Ethernet Controller (Copper) diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index 08e24dc3dc0e..2e758f796d60 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_82571_H_ #define _E1000E_82571_H_ diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index c2dcfcc10857..106de493373c 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2013 Intel Corporation. +# Copyright(c) 1999 - 2014 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# You should have received a copy of the GNU General Public License +# along with this program; if not, see <http://www.gnu.org/licenses/>. # # The full GNU General Public License is included in this distribution in # the file called "COPYING". diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 351c94a0cf74..d18e89212575 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ @@ -35,9 +28,11 @@ /* Definitions for power management and wakeup registers */ /* Wake Up Control */ -#define E1000_WUC_APME 0x00000001 /* APM Enable */ -#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ -#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ /* Wake Up Filter Control */ #define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 0150f7fc893d..1471c5464a89 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ /* Linux PRO/1000 Ethernet Driver main header file */ @@ -269,6 +262,7 @@ struct e1000_adapter { u32 tx_head_addr; u32 tx_fifo_size; u32 tx_dma_failed; + u32 tx_hwtstamp_timeouts; /* Rx */ bool (*clean_rx) (struct e1000_ring *ring, int *work_done, @@ -333,7 +327,6 @@ struct e1000_adapter { struct work_struct update_phy_task; struct work_struct print_hang_task; - bool idle_check; int phy_hang_count; u16 tx_ring_count; @@ -342,6 +335,7 @@ struct e1000_adapter { struct hwtstamp_config hwtstamp_config; struct delayed_work systim_overflow_work; struct sk_buff *tx_hwtstamp_skb; + unsigned long tx_hwtstamp_start; struct work_struct tx_hwtstamp_work; spinlock_t systim_lock; /* protects SYSTIML/H regsters */ struct cyclecounter cc; @@ -476,7 +470,7 @@ void e1000e_check_options(struct e1000_adapter *adapter); void e1000e_set_ethtool_ops(struct net_device *netdev); int e1000e_up(struct e1000_adapter *adapter); -void e1000e_down(struct e1000_adapter *adapter); +void e1000e_down(struct e1000_adapter *adapter, bool reset); void e1000e_reinit_locked(struct e1000_adapter *adapter); void e1000e_reset(struct e1000_adapter *adapter); void e1000e_power_up_phy(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index d14c8f53384c..cad250bc1b99 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ /* ethtool support for e1000 */ @@ -111,6 +104,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), E1000_STAT("uncorr_ecc_errors", uncorr_errors), E1000_STAT("corr_ecc_errors", corr_errors), + E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), }; #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) @@ -332,7 +326,7 @@ static int e1000_set_settings(struct net_device *netdev, /* reset the link */ if (netif_running(adapter->netdev)) { - e1000e_down(adapter); + e1000e_down(adapter, true); e1000e_up(adapter); } else { e1000e_reset(adapter); @@ -380,7 +374,7 @@ static int e1000_set_pauseparam(struct net_device *netdev, if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; if (netif_running(adapter->netdev)) { - e1000e_down(adapter); + e1000e_down(adapter, true); e1000e_up(adapter); } else { e1000e_reset(adapter); @@ -726,7 +720,7 @@ static int e1000_set_ringparam(struct net_device *netdev, pm_runtime_get_sync(netdev->dev.parent); - e1000e_down(adapter); + e1000e_down(adapter, true); /* We can't just free everything and then setup again, because the * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring @@ -924,15 +918,21 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) } if (mac->type == e1000_pch2lan) { /* SHRAH[0,1,2] different than previous */ - if (i == 7) + if (i == 1) mask &= 0xFFF4FFFF; /* SHRAH[3] different than SHRAH[0,1,2] */ - if (i == 10) + if (i == 4) mask |= (1 << 30); + /* RAR[1-6] owned by management engine - skipping */ + if (i > 0) + i += 6; } REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, 0xFFFFFFFF); + /* reset index to actual value */ + if ((mac->type == e1000_pch2lan) && (i > 6)) + i -= 6; } for (i = 0; i < mac->mta_reg_count; i++) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index b7f38435d1fd..6b3de5f39a97 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ @@ -655,12 +648,20 @@ struct e1000_shadow_ram { #define E1000_ICH8_SHADOW_RAM_WORDS 2048 +/* I218 PHY Ultra Low Power (ULP) states */ +enum e1000_ulp_state { + e1000_ulp_state_unknown, + e1000_ulp_state_off, + e1000_ulp_state_on, +}; + struct e1000_dev_spec_ich8lan { bool kmrn_lock_loss_workaround_enabled; struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; bool nvm_k1_enabled; bool eee_disable; u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; }; struct e1000_hw { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 42f0f6717511..9866f264f55e 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ /* 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection @@ -53,6 +46,14 @@ * 82578DC Gigabit Network Connection * 82579LM Gigabit Network Connection * 82579V Gigabit Network Connection + * Ethernet Connection I217-LM + * Ethernet Connection I217-V + * Ethernet Connection I218-V + * Ethernet Connection I218-LM + * Ethernet Connection (2) I218-LM + * Ethernet Connection (2) I218-V + * Ethernet Connection (3) I218-LM + * Ethernet Connection (3) I218-V */ #include "e1000.h" @@ -142,7 +143,9 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); +static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) { @@ -239,6 +242,47 @@ out: } /** + * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value + * @hw: pointer to the HW structure + * + * Toggling the LANPHYPC pin value fully power-cycles the PHY and is + * used to reset the PHY to a quiescent state when necessary. + **/ +static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) +{ + u32 mac_reg; + + /* Set Phy Config Counter to 50msec */ + mac_reg = er32(FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = er32(CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, mac_reg); + e1e_flush(); + usleep_range(10, 20); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, mac_reg); + e1e_flush(); + + if (hw->mac.type < e1000_pch_lpt) { + msleep(50); + } else { + u16 count = 20; + + do { + usleep_range(5000, 10000); + } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); + + msleep(30); + } +} + +/** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure * @@ -247,6 +291,7 @@ out: **/ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) { + struct e1000_adapter *adapter = hw->adapter; u32 mac_reg, fwsm = er32(FWSM); s32 ret_val; @@ -255,6 +300,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ e1000_gate_hw_phy_config_ich8lan(hw, true); + /* It is not possible to be certain of the current state of ULP + * so forcibly disable it. + */ + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; + e1000_disable_ulp_lpt_lp(hw, true); + ret_val = hw->phy.ops.acquire(hw); if (ret_val) { e_dbg("Failed to initialize PHY flow\n"); @@ -300,33 +351,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) break; } - e_dbg("Toggling LANPHYPC\n"); - - /* Set Phy Config Counter to 50msec */ - mac_reg = er32(FEXTNVM3); - mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; - mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; - ew32(FEXTNVM3, mac_reg); - /* Toggle LANPHYPC Value bit */ - mac_reg = er32(CTRL); - mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; - mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; - ew32(CTRL, mac_reg); - e1e_flush(); - usleep_range(10, 20); - mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; - ew32(CTRL, mac_reg); - e1e_flush(); - if (hw->mac.type < e1000_pch_lpt) { - msleep(50); - } else { - u16 count = 20; - do { - usleep_range(5000, 10000); - } while (!(er32(CTRL_EXT) & - E1000_CTRL_EXT_LPCD) && count--); - usleep_range(30000, 60000); + e1000_toggle_lanphypc_pch_lpt(hw); + if (hw->mac.type >= e1000_pch_lpt) { if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -349,12 +376,31 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) hw->phy.ops.release(hw); if (!ret_val) { + + /* Check to see if able to reset PHY. Print error if not */ + if (hw->phy.ops.check_reset_block(hw)) { + e_err("Reset blocked by ME\n"); + goto out; + } + /* Reset the PHY before any access to it. Doing so, ensures * that the PHY is in a known good state before we read/write * PHY registers. The generic reset is sufficient here, * because we haven't determined the PHY type yet. */ ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* On a successful reset, possibly need to wait for the PHY + * to quiesce to an accessible state before returning control + * to the calling function. If the PHY does not quiesce, then + * return E1000E_BLK_PHY_RESET, as this is the condition that + * the PHY is in. + */ + ret_val = hw->phy.ops.check_reset_block(hw); + if (ret_val) + e_err("ME blocked access to PHY after reset\n"); } out: @@ -724,8 +770,14 @@ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) * Enable/disable EEE based on setting in dev_spec structure, the duplex of * the link and the EEE capabilities of the link partner. The LPI Control * register bits will remain set only if/when link is up. + * + * EEE LPI must not be asserted earlier than one second after link is up. + * On 82579, EEE LPI should not be enabled until such time otherwise there + * can be link issues with some switches. Other devices can have EEE LPI + * enabled immediately upon link up since they have a timer in hardware which + * prevents LPI from being asserted too early. **/ -static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val; @@ -979,6 +1031,253 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) } /** + * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @to_sx: boolean indicating a system power state transition to Sx + * + * When link is down, configure ULP mode to significantly reduce the power + * to the PHY. If on a Manageability Engine (ME) enabled system, tell the + * ME firmware to start the ULP configuration. If not on an ME enabled + * system, configure the ULP mode by software. + */ +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) +{ + u32 mac_reg; + s32 ret_val = 0; + u16 phy_reg; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) + return 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure ULP mode in the PHY */ + mac_reg = er32(H2ME); + mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + + goto out; + } + + if (!to_sx) { + int i = 0; + + /* Poll up to 5 seconds for Cable Disconnected indication */ + while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { + /* Bail if link is re-acquired */ + if (er32(STATUS) & E1000_STATUS_LU) + return -E1000_ERR_PHY; + + if (i++ == 100) + break; + + msleep(50); + } + e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n", + (er32(FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* Set Inband ULP Exit, Reset to SMBus mode and + * Disable SMBus Release on PERST# in PHY + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + if (to_sx) { + if (er32(WUFC) & E1000_WUFC_LNKC) + phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + + phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + } else { + phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + } + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Set Disable SMBus Release on PERST# in MAC */ + mac_reg = er32(FEXTNVM7); + mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; + ew32(FEXTNVM7, mac_reg); + + /* Commit ULP changes in PHY by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); +release: + hw->phy.ops.release(hw); +out: + if (ret_val) + e_dbg("Error in ULP enable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; + + return ret_val; +} + +/** + * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @force: boolean indicating whether or not to force disabling ULP + * + * Un-configure ULP mode when link is up, the system is transitioned from + * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled + * system, poll for an indication from ME that ULP has been un-configured. + * If not on an ME enabled system, un-configure the ULP mode by software. + * + * During nominal operation, this function is called when link is acquired + * to disable ULP mode (force=false); otherwise, for example when unloading + * the driver or during Sx->S0 transitions, this is called with force=true + * to forcibly disable ULP. + */ +static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 phy_reg; + int i = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) + return 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (force) { + /* Request ME un-configure ULP mode in the PHY */ + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ULP; + mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + } + + /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { + if (i++ == 10) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + usleep_range(10000, 20000); + } + e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + + if (force) { + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + } else { + /* Clear H2ME.ULP after ME ULP configuration */ + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ULP; + ew32(H2ME, mac_reg); + } + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (force) + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + + /* Unforce SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) { + /* The MAC might be in PCIe mode, so temporarily force to + * SMBus mode in order to access the PHY. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + msleep(50); + + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, + &phy_reg); + if (ret_val) + goto release; + } + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* When ULP mode was previously entered, K1 was disabled by the + * hardware. Re-Enable K1 in the PHY when exiting ULP. + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_PM_CTRL_K1_ENABLE; + e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); + + /* Clear ULP enabled configuration */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~(I218_ULP_CONFIG1_IND | + I218_ULP_CONFIG1_STICKY_ULP | + I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_WOL_HOST | + I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Commit ULP changes by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Clear Disable SMBus Release on PERST# in MAC */ + mac_reg = er32(FEXTNVM7); + mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; + ew32(FEXTNVM7, mac_reg); + +release: + hw->phy.ops.release(hw); + if (force) { + e1000_phy_hw_reset(hw); + msleep(50); + } +out: + if (ret_val) + e_dbg("Error in ULP disable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; + + return ret_val; +} + +/** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * @@ -1106,9 +1405,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) e1000e_check_downshift(hw); /* Enable/Disable EEE after link up */ - ret_val = e1000_set_eee_pchlan(hw); - if (ret_val) - return ret_val; + if (hw->phy.type > e1000_phy_82579) { + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + return ret_val; + } /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. @@ -1374,7 +1675,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) /* RAR[1-6] are owned by manageability. Skip those and program the * next address into the SHRA register array. */ - if (index < (u32)(hw->mac.rar_entry_count - 6)) { + if (index < (u32)(hw->mac.rar_entry_count)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); @@ -1484,11 +1785,13 @@ out: **/ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) { - u32 fwsm; + bool blocked = false; + int i = 0; - fwsm = er32(FWSM); - - return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; + while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && + (i++ < 10)) + usleep_range(10000, 20000); + return blocked ? E1000_BLK_PHY_RESET : 0; } /** diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 217090df33e7..bead50f9187b 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_ICH8LAN_H_ #define _E1000E_ICH8LAN_H_ @@ -65,11 +58,16 @@ #define E1000_FWSM_WLOCK_MAC_MASK 0x0380 #define E1000_FWSM_WLOCK_MAC_SHIFT 7 +#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ /* Shared Receive Address Registers */ #define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) #define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) +#define E1000_H2ME 0x05B50 /* Host to ME */ +#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ +#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ + #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_OFF1_OFF2 << 8) | \ (ID_LED_OFF1_ON2 << 4) | \ @@ -82,6 +80,9 @@ #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 +/* FEXT register bit definition */ +#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 + #define E1000_FEXTNVM_SW_CONFIG 1 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ @@ -95,10 +96,12 @@ #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 -#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */ +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ #define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ #define PHY_PAGE_SHIFT 5 @@ -161,6 +164,16 @@ #define CV_SMB_CTRL PHY_REG(769, 23) #define CV_SMB_CTRL_FORCE_SMBUS 0x0001 +/* I218 Ultra Low Power Configuration 1 Register */ +#define I218_ULP_CONFIG1 PHY_REG(779, 16) +#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ +#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ +#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ +#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ +#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ +#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ + /* SMBus Address Phy Register */ #define HV_SMB_ADDR PHY_REG(768, 26) #define HV_SMB_ADDR_MASK 0x007F @@ -195,6 +208,7 @@ /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_ENABLE 0x4000 #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ @@ -268,4 +282,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); +s32 e1000_set_eee_pchlan(struct e1000_hw *hw); +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx); #endif /* _E1000E_ICH8LAN_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 2480c1091873..baa0a466d1d0 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h index a61fee404ebe..4e81c2825b7a 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.h +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_MAC_H_ #define _E1000E_MAC_H_ diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index e4b0f1ef92f6..cb37ff1f1321 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h index 326897c29ea8..a8c27f98f7b0 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.h +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_MANAGE_H_ #define _E1000E_MANAGE_H_ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 6d91933c4cdd..dce377b59b2c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -885,7 +878,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, struct sk_buff *skb) { if (netdev->features & NETIF_F_RXHASH) - skb->rxhash = le32_to_cpu(rss); + skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); } /** @@ -1097,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work) adapter->tx_hang_recheck = true; return; } - /* Real hang detected */ adapter->tx_hang_recheck = false; + + if (er32(TDH(0)) == er32(TDT(0))) { + e_dbg("false hang detected, ignoring\n"); + return; + } + + /* Real hang detected */ netif_stop_queue(netdev); e1e_rphy(hw, MII_BMSR, &phy_status); @@ -1128,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work) eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), phy_status, phy_1000t_status, phy_ext_status, pci_status); + e1000e_dump(adapter); + /* Suggest workaround for known h/w issue */ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) e_err("Try turning off Tx pause (flow control) via ethtool\n"); @@ -1147,9 +1148,6 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) tx_hwtstamp_work); struct e1000_hw *hw = &adapter->hw; - if (!adapter->tx_hwtstamp_skb) - return; - if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { struct skb_shared_hwtstamps shhwtstamps; u64 txstmp; @@ -1162,6 +1160,12 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); dev_kfree_skb_any(adapter->tx_hwtstamp_skb); adapter->tx_hwtstamp_skb = NULL; + } else if (time_after(jiffies, adapter->tx_hwtstamp_start + + adapter->tx_timeout_factor * HZ)) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + e_warn("clearing Tx timestamp hang"); } else { /* reschedule to check later */ schedule_work(&adapter->tx_hwtstamp_work); @@ -1701,7 +1705,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) adapter->flags2 &= ~FLAG2_IS_DISCARDING; writel(0, rx_ring->head); - if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, 0); else writel(0, rx_ring->tail); @@ -2038,13 +2042,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) msix_entry), GFP_KERNEL); if (adapter->msix_entries) { + struct e1000_adapter *a = adapter; + for (i = 0; i < adapter->num_vectors; i++) adapter->msix_entries[i].entry = i; - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, - adapter->num_vectors); - if (err == 0) + err = pci_enable_msix_range(a->pdev, + a->msix_entries, + a->num_vectors, + a->num_vectors); + if (err > 0) return; } /* MSI-X failed, so fall through and try MSI */ @@ -2402,7 +2409,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) tx_ring->next_to_clean = 0; writel(0, tx_ring->head); - if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, 0); else writel(0, tx_ring->tail); @@ -2894,7 +2901,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; u64 tdba; - u32 tdlen, tarc; + u32 tdlen, tctl, tarc; /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; @@ -2931,6 +2938,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* erratum work around: set txdctl the same for both queues */ ew32(TXDCTL(1), er32(TXDCTL(0))); + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { tarc = er32(TARC(0)); /* set the speed mode bit, we'll clear it if we're not at @@ -2961,6 +2974,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* enable Report Status bit */ adapter->txd_cmd |= E1000_TXD_CMD_RS; + ew32(TCTL, tctl); + hw->mac.ops.config_collision_dist(hw); } @@ -2976,11 +2991,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) u32 rctl, rfctl; u32 pages = 0; - /* Workaround Si errata on PCHx - configure jumbo frame flow */ - if ((hw->mac.type >= e1000_pch2lan) && - (adapter->netdev->mtu > ETH_DATA_LEN) && - e1000_lv_jumbo_workaround_ich8lan(hw, true)) - e_dbg("failed to enable jumbo frame workaround mode\n"); + /* Workaround Si errata on PCHx - configure jumbo frame flow. + * If jumbo frames not set, program related MAC/PHY registers + * to h/w defaults + */ + if (hw->mac.type >= e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable|disable jumbo frame workaround mode\n"); + } /* Program MC offset vector base */ rctl = er32(RCTL); @@ -3331,6 +3356,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; u32 rctl; + if (pm_runtime_suspended(netdev->dev.parent)) + return; + /* Check for Promiscuous and All Multicast modes */ rctl = er32(RCTL); @@ -3691,10 +3719,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter) */ static void e1000_power_down_phy(struct e1000_adapter *adapter) { - /* WoL is enabled */ - if (adapter->wol) - return; - if (adapter->hw.phy.ops.power_down) adapter->hw.phy.ops.power_down(&adapter->hw); } @@ -3911,10 +3935,8 @@ void e1000e_reset(struct e1000_adapter *adapter) } if (!netif_running(adapter->netdev) && - !test_bit(__E1000_TESTING, &adapter->state)) { + !test_bit(__E1000_TESTING, &adapter->state)) e1000_power_down_phy(adapter); - return; - } e1000_get_phy_info(hw); @@ -3981,7 +4003,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter) static void e1000e_update_stats(struct e1000_adapter *adapter); -void e1000e_down(struct e1000_adapter *adapter) +/** + * e1000e_down - quiesce the device and optionally reset the hardware + * @adapter: board private structure + * @reset: boolean flag to reset the hardware or not + */ +void e1000e_down(struct e1000_adapter *adapter, bool reset) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; @@ -4035,12 +4062,8 @@ void e1000e_down(struct e1000_adapter *adapter) e1000_lv_jumbo_workaround_ich8lan(hw, false)) e_dbg("failed to disable jumbo frame workaround mode\n"); - if (!pci_channel_offline(adapter->pdev)) + if (reset && !pci_channel_offline(adapter->pdev)) e1000e_reset(adapter); - - /* TODO: for power management, we could drop the link and - * pci_disable_device here. - */ } void e1000e_reinit_locked(struct e1000_adapter *adapter) @@ -4048,7 +4071,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) might_sleep(); while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); - e1000e_down(adapter); + e1000e_down(adapter, true); e1000e_up(adapter); clear_bit(__E1000_RESETTING, &adapter->state); } @@ -4326,7 +4349,6 @@ static int e1000_open(struct net_device *netdev) adapter->tx_hang_recheck = false; netif_start_queue(netdev); - adapter->idle_check = true; hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); @@ -4376,14 +4398,15 @@ static int e1000_close(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); if (!test_bit(__E1000_DOWN, &adapter->state)) { - e1000e_down(adapter); + e1000e_down(adapter, true); e1000_free_irq(adapter); + + /* Link status message must follow this format */ + pr_info("%s NIC Link is Down\n", adapter->netdev->name); } napi_disable(&adapter->napi); - e1000_power_down_phy(adapter); - e1000e_free_tx_resources(adapter->tx_ring); e1000e_free_rx_resources(adapter->rx_ring); @@ -4460,11 +4483,16 @@ static void e1000e_update_phy_task(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, update_phy_task); + struct e1000_hw *hw = &adapter->hw; if (test_bit(__E1000_DOWN, &adapter->state)) return; - e1000_get_phy_info(&adapter->hw); + e1000_get_phy_info(hw); + + /* Enable EEE on 82579 after link up */ + if (hw->phy.type == e1000_phy_82579) + e1000_set_eee_pchlan(hw); } /** @@ -4799,6 +4827,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) if (adapter->phy_hang_count > 1) { adapter->phy_hang_count = 0; + e_dbg("PHY appears hung - resetting\n"); schedule_work(&adapter->reset_task); } } @@ -4957,15 +4986,11 @@ static void e1000_watchdog_task(struct work_struct *work) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); - /* The link is lost so the controller stops DMA. - * If there is queued Tx work that cannot be done - * or if on an 8000ES2LAN which requires a Rx packet - * buffer work-around on link down event, reset the - * controller to flush the Tx/Rx packet buffers. - * (Do the reset outside of interrupt context). + /* 8000ES2LAN requires a Rx packet buffer work-around + * on link down event; reset the controller to flush + * the Rx packet buffer. */ - if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + if (adapter->flags & FLAG_RX_NEEDS_RESTART) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, @@ -4988,6 +5013,15 @@ link_up: adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); + /* If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. + */ + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; + + /* If reset is necessary, do it outside of interrupt context. */ if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ @@ -5546,6 +5580,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= E1000_TX_FLAGS_HWTSTAMP; adapter->tx_hwtstamp_skb = skb_get(skb); + adapter->tx_hwtstamp_start = jiffies; schedule_work(&adapter->tx_hwtstamp_work); } else { skb_tx_timestamp(skb); @@ -5684,8 +5719,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) adapter->max_frame_size = max_frame; e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; + + pm_runtime_get_sync(netdev->dev.parent); + if (netif_running(netdev)) - e1000e_down(adapter); + e1000e_down(adapter, true); /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next @@ -5711,6 +5749,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) else e1000e_reset(adapter); + pm_runtime_put_sync(netdev->dev.parent); + clear_bit(__E1000_RESETTING, &adapter->state); return 0; @@ -5852,7 +5892,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) { struct e1000_hw *hw = &adapter->hw; - u32 i, mac_reg; + u32 i, mac_reg, wuc; u16 phy_reg, wuc_enable; int retval; @@ -5899,13 +5939,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) phy_reg |= BM_RCTL_RFCE; hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + wuc = E1000_WUC_PME_EN; + if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) + wuc |= E1000_WUC_APME; + /* enable PHY wakeup in MAC register */ ew32(WUFC, wufc); - ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); + ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | + E1000_WUC_PME_STATUS | wuc)); /* configure and enable PHY wakeup in PHY registers */ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); - hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); /* activate PHY wakeup */ wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; @@ -5918,15 +5963,10 @@ release: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) +static int e1000e_pm_freeze(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; - int retval = 0; netif_device_detach(netdev); @@ -5937,11 +5977,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) usleep_range(10000, 20000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); - e1000e_down(adapter); + + /* Quiesce the device without resetting the hardware */ + e1000e_down(adapter, false); e1000_free_irq(adapter); } e1000e_reset_interrupt_capability(adapter); + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + return 0; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; + status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -5972,12 +6030,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) ew32(CTRL_EXT, ctrl_ext); } + if (!runtime) + e1000e_power_up_phy(adapter); + if (adapter->flags & FLAG_IS_ICH) e1000_suspend_workarounds_ich8lan(&adapter->hw); - /* Allow time for pending master requests to run */ - e1000e_disable_pcie_master(&adapter->hw); - if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { /* enable wakeup by the PHY */ retval = e1000_init_phy_wakeup(adapter, wufc); @@ -5991,10 +6049,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) } else { ew32(WUC, 0); ew32(WUFC, 0); + + e1000_power_down_phy(adapter); } - if (adapter->hw.phy.type == e1000_phy_igp_3) + if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if (hw->mac.type == e1000_pch_lpt) { + if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ + retval = e1000_enable_ulp_lpt_lp(hw, !runtime); + + if (retval) + return retval; + } + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -6102,18 +6173,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) } #ifdef CONFIG_PM -static bool e1000e_pm_ready(struct e1000_adapter *adapter) -{ - return !!adapter->tx_ring->buffer_info; -} - static int __e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 aspm_disable_flag = 0; - u32 err; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) aspm_disable_flag = PCIE_LINK_STATE_L0S; @@ -6124,13 +6189,6 @@ static int __e1000_resume(struct pci_dev *pdev) pci_set_master(pdev); - e1000e_set_interrupt_capability(adapter); - if (netif_running(netdev)) { - err = e1000_request_irq(adapter); - if (err) - return err; - } - if (hw->mac.type >= e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); @@ -6169,11 +6227,6 @@ static int __e1000_resume(struct pci_dev *pdev) e1000_init_manageability_pt(adapter); - if (netif_running(netdev)) - e1000e_up(adapter); - - netif_device_attach(netdev); - /* If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. @@ -6184,75 +6237,111 @@ static int __e1000_resume(struct pci_dev *pdev) return 0; } +static int e1000e_pm_thaw(struct device *dev) +{ + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000e_set_interrupt_capability(adapter); + if (netif_running(netdev)) { + u32 err = e1000_request_irq(adapter); + + if (err) + return err; + + e1000e_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} + #ifdef CONFIG_PM_SLEEP -static int e1000_suspend(struct device *dev) +static int e1000e_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + e1000e_pm_freeze(dev); + return __e1000_shutdown(pdev, false); } -static int e1000_resume(struct device *dev) +static int e1000e_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); + int rc; - if (e1000e_pm_ready(adapter)) - adapter->idle_check = true; + rc = __e1000_resume(pdev); + if (rc) + return rc; - return __e1000_resume(pdev); + return e1000e_pm_thaw(dev); } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_RUNTIME -static int e1000_runtime_suspend(struct device *dev) +static int e1000e_pm_runtime_idle(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (!e1000e_pm_ready(adapter)) - return 0; + if (!e1000e_has_link(adapter)) + pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); - return __e1000_shutdown(pdev, true); + return -EBUSY; } -static int e1000_idle(struct device *dev) +static int e1000e_pm_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); + int rc; - if (!e1000e_pm_ready(adapter)) - return 0; + rc = __e1000_resume(pdev); + if (rc) + return rc; - if (adapter->idle_check) { - adapter->idle_check = false; - if (!e1000e_has_link(adapter)) - pm_schedule_suspend(dev, MSEC_PER_SEC); - } + if (netdev->flags & IFF_UP) + rc = e1000e_up(adapter); - return -EBUSY; + return rc; } -static int e1000_runtime_resume(struct device *dev) +static int e1000e_pm_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (!e1000e_pm_ready(adapter)) - return 0; + if (netdev->flags & IFF_UP) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + /* Down the device without resetting the hardware */ + e1000e_down(adapter, false); + } - adapter->idle_check = !dev->power.runtime_auto; - return __e1000_resume(pdev); + if (__e1000_shutdown(pdev, true)) { + e1000e_pm_runtime_resume(dev); + return -EBUSY; + } + + return 0; } #endif /* CONFIG_PM_RUNTIME */ #endif /* CONFIG_PM */ static void e1000_shutdown(struct pci_dev *pdev) { + e1000e_pm_freeze(&pdev->dev); + __e1000_shutdown(pdev, false); } @@ -6338,7 +6427,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) - e1000e_down(adapter); + e1000e_down(adapter, true); pci_disable_device(pdev); /* Request a slot slot reset. */ @@ -6350,7 +6439,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the e1000_resume routine. + * resembles the first-half of the e1000e_pm_resume routine. */ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) { @@ -6397,7 +6486,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the - * second-half of the e1000_resume routine. + * second-half of the e1000e_pm_resume routine. */ static void e1000_io_resume(struct pci_dev *pdev) { @@ -6902,9 +6991,6 @@ static void e1000_remove(struct pci_dev *pdev) } } - if (!(netdev->flags & IFF_UP)) - e1000_power_down_phy(adapter); - /* Don't lie to e1000_close() down the road. */ if (!down) clear_bit(__E1000_DOWN, &adapter->state); @@ -7026,9 +7112,16 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); static const struct dev_pm_ops e1000_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) - SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, - e1000_idle) +#ifdef CONFIG_PM_SLEEP + .suspend = e1000e_pm_suspend, + .resume = e1000e_pm_resume, + .freeze = e1000e_pm_freeze, + .thaw = e1000e_pm_thaw, + .poweroff = e1000e_pm_suspend, + .restore = e1000e_pm_resume, +#endif + SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, + e1000e_pm_runtime_idle) }; /* PCI Device API Driver */ @@ -7055,7 +7148,7 @@ static int __init e1000_init_module(void) int ret; pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index d70a03906ac0..a9a976f04bff 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h index 45fc69561627..342bf69efab5 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.h +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_NVM_H_ #define _E1000E_NVM_H_ diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index c16bd75b6caa..d0ac0f3249c8 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #include <linux/netdevice.h> #include <linux/module.h> @@ -381,6 +374,12 @@ void e1000e_check_options(struct e1000_adapter *adapter) "%s set to dynamic mode\n", opt.name); adapter->itr = 20000; break; + case 2: + dev_info(&adapter->pdev->dev, + "%s Invalid mode - setting default\n", + opt.name); + adapter->itr_setting = opt.def; + /* fall-through */ case 3: dev_info(&adapter->pdev->dev, "%s set to dynamic conservative mode\n", diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 20e71f4ca426..00b3fc98bf30 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index f4f71b9991e3..3841bccf058c 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_PHY_H_ #define _E1000E_PHY_H_ diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 065f8c80d4f2..fb1a914a3ad4 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ /* PTP 1588 Hardware Clock (PHC) * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) @@ -47,6 +40,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) ptp_clock_info); struct e1000_hw *hw = &adapter->hw; bool neg_adj = false; + unsigned long flags; u64 adjustment; u32 timinca, incvalue; s32 ret_val; @@ -64,6 +58,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) if (ret_val) return ret_val; + spin_lock_irqsave(&adapter->systim_lock, flags); + incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; adjustment = incvalue; @@ -77,6 +73,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) ew32(TIMINCA, timinca); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + return 0; } @@ -191,6 +189,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = { .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, + .n_pins = 0, .pps = 0, .adjfreq = e1000e_phc_adjfreq, .adjtime = e1000e_phc_adjtime, diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index a7e6a3e37257..ea235bbe50d3 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #ifndef _E1000E_REGS_H_ #define _E1000E_REGS_H_ @@ -39,6 +32,7 @@ #define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXT 0x0002C /* Future Extended - RW */ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 72dae4d97b43..beb7b4393a6c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -86,12 +86,12 @@ #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_HI_SHIFT 8 -#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT) +#define I40E_NVM_VERSION_HI_SHIFT 12 +#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 -#define I40E_CURRENT_NVM_VERSION_LO 0x30 +#define I40E_CURRENT_NVM_VERSION_LO 0x40 /* magic for getting defines into strings */ #define STRINGIFY(foo) #foo @@ -136,6 +136,7 @@ enum i40e_state_t { __I40E_EMP_RESET_REQUESTED, __I40E_FILTER_OVERFLOW_PROMISC, __I40E_SUSPENDED, + __I40E_BAD_EEPROM, }; enum i40e_interrupt_policy { @@ -152,8 +153,21 @@ struct i40e_lump_tracking { }; #define I40E_DEFAULT_ATR_SAMPLE_RATE 20 -#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512 -struct i40e_fdir_data { +#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 +#define I40E_FDIR_BUFFER_FULL_MARGIN 10 +#define I40E_FDIR_BUFFER_HEAD_ROOM 200 + +struct i40e_fdir_filter { + struct hlist_node fdir_node; + /* filter ipnut set */ + u8 flow_type; + u8 ip4_proto; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be32 sctp_v_tag; + /* filter control */ u16 q_index; u8 flex_off; u8 pctype; @@ -162,7 +176,6 @@ struct i40e_fdir_data { u8 fd_status; u16 cnt_index; u32 fd_id; - u8 *raw_packet; }; #define I40E_ETH_P_LLDP 0x88cc @@ -196,7 +209,7 @@ struct i40e_pf { bool fc_autoneg_status; u16 eeprom_version; - u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */ + u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ u16 num_req_vfs; /* num vfs requested for this vf */ @@ -210,6 +223,9 @@ struct i40e_pf { u8 atr_sample_rate; bool wol_en; + struct hlist_head fdir_filter_list; + u16 fdir_pf_active_filters; + #ifdef CONFIG_I40E_VXLAN __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; u16 pending_vxlan_bitmap; @@ -251,6 +267,9 @@ struct i40e_pf { #define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) #endif + /* tracks features that get auto disabled by errors */ + u64 auto_disable_flags; + bool stat_offsets_loaded; struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; @@ -477,10 +496,10 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw) "f%d.%d a%d.%d n%02x.%02x e%08x", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.api_maj_ver, hw->aq.api_min_ver, - (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) - >> I40E_NVM_VERSION_HI_SHIFT, - (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) - >> I40E_NVM_VERSION_LO_SHIFT, + (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> + I40E_NVM_VERSION_HI_SHIFT, + (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> + I40E_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack); return buf; @@ -534,9 +553,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig); -int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, struct i40e_pf *pf, bool add); - +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add); +void i40e_fdir_check_and_reenable(struct i40e_pf *pf); +int i40e_get_current_fd_count(struct i40e_pf *pf); +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -575,6 +598,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index a50e6b3479ae..ed3902bf249b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) desc_cb = *desc; cb_func(hw, &desc_cb); } - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); - memset((void *)details, 0, - sizeof(struct i40e_asq_cmd_details)); + memset(desc, 0, sizeof(*desc)); + memset(details, 0, sizeof(*details)); ntc++; if (ntc == asq->count) ntc = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index e7f38b57834d..922cdcc45c54 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -162,6 +162,372 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, return status; } +/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40e_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + + /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure @@ -1409,9 +1775,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) - p = (struct i40e_hw_capabilities *)&hw->dev_caps; + p = &hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) - p = (struct i40e_hw_capabilities *)&hw->func_caps; + p = &hw->func_caps; else return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 50730141bb7b..036570d76176 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -332,6 +332,7 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, u16 type; u16 length; u16 typelength; + u16 offset = 0; if (!lldpmib || !dcbcfg) return I40E_ERR_PARAM; @@ -339,15 +340,17 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, /* set to the start of LLDPDU */ lldpmib += ETH_HLEN; tlv = (struct i40e_lldp_org_tlv *)lldpmib; - while (tlv) { + while (1) { typelength = ntohs(tlv->typelength); type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> I40E_LLDP_TLV_TYPE_SHIFT); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); + offset += sizeof(typelength) + length; - if (type == I40E_TLV_TYPE_END) - break;/* END TLV break out */ + /* END TLV or beyond LLDPDU size */ + if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) + break; switch (type) { case I40E_TLV_TYPE_ORG: diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index da22c3fa2c00..3c37386fd138 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1011,10 +1011,12 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) **/ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) { - if (enable) + if (enable) { pf->flags |= flag; - else + } else { pf->flags &= ~flag; + pf->auto_disable_flags |= flag; + } dev_info(&pf->pdev->dev, "requesting a pf reset\n"); i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } @@ -1467,19 +1469,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp, pf->msg_enable); } } else if (strncmp(cmd_buf, "pfr", 3) == 0) { - dev_info(&pf->pdev->dev, "forcing PFR\n"); + dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "corer", 5) == 0) { - dev_info(&pf->pdev->dev, "forcing CoreR\n"); + dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "globr", 5) == 0) { - dev_info(&pf->pdev->dev, "forcing GlobR\n"); + dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "empr", 4) == 0) { - dev_info(&pf->pdev->dev, "forcing EMPR\n"); + dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "read", 4) == 0) { @@ -1663,28 +1665,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp, desc = NULL; } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { - struct i40e_fdir_data fd_data; + struct i40e_fdir_filter fd_data; u16 packet_len, i, j = 0; char *asc_packet; + u8 *raw_packet; bool add = false; int ret; - asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + if (strncmp(cmd_buf, "add", 3) == 0) + add = true; + + if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); if (!asc_packet) goto command_write_done; - fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, - GFP_KERNEL); + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, + GFP_KERNEL); - if (!fd_data.raw_packet) { + if (!raw_packet) { kfree(asc_packet); asc_packet = NULL; goto command_write_done; } - if (strncmp(cmd_buf, "add", 3) == 0) - add = true; cnt = sscanf(&cmd_buf[13], "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", &fd_data.q_index, @@ -1698,36 +1708,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp, cnt); kfree(asc_packet); asc_packet = NULL; - kfree(fd_data.raw_packet); + kfree(raw_packet); goto command_write_done; } /* fix packet length if user entered 0 */ if (packet_len == 0) - packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP; + packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE; /* make sure to check the max as well */ packet_len = min_t(u16, - packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); + packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE); for (i = 0; i < packet_len; i++) { sscanf(&asc_packet[j], "%2hhx ", - &fd_data.raw_packet[i]); + &raw_packet[i]); j += 3; } dev_info(&pf->pdev->dev, "FD raw packet dump\n"); print_hex_dump(KERN_INFO, "FD raw packet: ", DUMP_PREFIX_OFFSET, 16, 1, - fd_data.raw_packet, packet_len, true); - ret = i40e_program_fdir_filter(&fd_data, pf, add); + raw_packet, packet_len, true); + ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add); if (!ret) { dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); } else { dev_info(&pf->pdev->dev, "Filter command send failed %d\n", ret); } - kfree(fd_data.raw_packet); - fd_data.raw_packet = NULL; + kfree(raw_packet); + raw_packet = NULL; kfree(asc_packet); asc_packet = NULL; } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { @@ -2077,9 +2087,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, if (!vsi) { dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; - } - if (rtnl_trylock()) { + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", + vsi_seid); + } else if (test_bit(__I40E_DOWN, &vsi->state)) { + dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", + vsi_seid); + } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev); rtnl_unlock(); dev_info(&pf->pdev->dev, "tx_timeout called\n"); @@ -2098,9 +2112,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, if (!vsi) { dev_info(&pf->pdev->dev, "change_mtu: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; - } - if (rtnl_trylock()) { + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, mtu); rtnl_unlock(); @@ -2119,9 +2134,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, if (!vsi) { dev_info(&pf->pdev->dev, "set_rx_mode: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; - } - if (rtnl_trylock()) { + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); rtnl_unlock(); dev_info(&pf->pdev->dev, "set_rx_mode called\n"); @@ -2139,11 +2155,14 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, if (!vsi) { dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", + vsi_seid); + } else { + for (i = 0; i < vsi->num_q_vectors; i++) + napi_schedule(&vsi->q_vectors[i]->napi); + dev_info(&pf->pdev->dev, "napi called\n"); } - for (i = 0; i < vsi->num_q_vectors; i++) - napi_schedule(&vsi->q_vectors[i]->napi); - dev_info(&pf->pdev->dev, "napi called\n"); } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", i40e_dbg_netdev_ops_buf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b1d7d8c5cb9b..03d99cbc5c25 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -62,6 +62,9 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_crc_errors), }; +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd); + /* These PF_STATs might look like duplicates of some NETDEV_STATs, * but they are separate. This device supports Virtualization, and * as such might have several netdevs supporting VMDq and FCoE going @@ -84,6 +87,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), + I40E_PF_STAT("tx_timeout", tx_timeout_count), I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), @@ -110,6 +114,11 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + /* LPI stats */ + I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), + I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), + I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), + I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), }; #define I40E_QUEUE_STATS_LEN(n) \ @@ -387,7 +396,7 @@ static int i40e_get_eeprom(struct net_device *netdev, ret_val = i40e_aq_read_nvm(hw, 0x0, eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), len, - (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), + eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), last, NULL); if (ret_val) { dev_info(&pf->pdev->dev, @@ -399,7 +408,7 @@ static int i40e_get_eeprom(struct net_device *netdev, release_nvm: i40e_release_nvm(hw); - memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); + memcpy(bytes, eeprom_buff, eeprom->len); free_buff: kfree(eeprom_buff); return ret_val; @@ -649,18 +658,18 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, /* process Tx ring statistics */ do { - start = u64_stats_fetch_begin_bh(&tx_ring->syncp); + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); data[i] = tx_ring->stats.packets; data[i + 1] = tx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); /* Rx ring is the 2nd half of the queue pair */ rx_ring = &tx_ring[1]; do { - start = u64_stats_fetch_begin_bh(&rx_ring->syncp); + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); data[i + 2] = rx_ring->stats.packets; data[i + 3] = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); } rcu_read_unlock(); if (vsi == pf->vsi[pf->lan_vsi]) { @@ -1112,6 +1121,84 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) } /** + * i40e_get_ethtool_fdir_all - Populates the rule count of a command + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * @rule_locs: Array of used rule locations + * + * This function populates both the total and actual rule count of + * the ethtool flow classification command + * + * Returns 0 on success or -EMSGSIZE if entry not found + **/ +static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40e_fdir_filter *rule; + struct hlist_node *node2; + int cnt = 0; + + /* report total rule count */ + cmd->data = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = rule->fd_id; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +/** + * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * + * This function looks up a filter based on the Rx flow classification + * command and fills the flow spec info for it if found + * + * Returns 0 on success or -EINVAL if filter not found + **/ +static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct i40e_fdir_filter *rule = NULL; + struct hlist_node *node2; + + /* report total rule count */ + cmd->data = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->fd_id) + break; + } + + if (!rule || fsp->location != rule->fd_id) + return -EINVAL; + + fsp->flow_type = rule->flow_type; + fsp->h_u.tcp_ip4_spec.psrc = rule->src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0]; + fsp->ring_cookie = rule->q_index; + + return 0; +} + +/** * i40e_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -1135,15 +1222,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, ret = i40e_get_rss_hash_opts(pf, cmd); break; case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = 10; + cmd->rule_cnt = pf->fdir_pf_active_filters; ret = 0; break; case ETHTOOL_GRXCLSRULE: - ret = 0; + ret = i40e_get_ethtool_fdir_entry(pf, cmd); break; case ETHTOOL_GRXCLSRLALL: - cmd->data = 500; - ret = 0; + ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); + break; default: break; } @@ -1274,289 +1361,182 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return 0; } -#define IP_HEADER_OFFSET 14 -#define I40E_UDPIP_DUMMY_PACKET_LEN 42 /** - * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it + * i40e_match_fdir_input_set - Match a new filter against an existing one + * @rule: The filter already added + * @input: The new filter to comapre against * - * Returns 0 if the filters were successfully added or removed + * Returns true if the two input set match **/ -static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) +static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, + struct i40e_fdir_filter *input) { - struct i40e_pf *pf = vsi->back; - struct udphdr *udp; - struct iphdr *ip; - bool err = false; - int ret; - int i; - char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, - 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - - memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN); - - ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); - udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET - + sizeof(struct iphdr)); - - ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src; - ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; - udp->source = fsp->h_u.tcp_ip4_spec.psrc; - udp->dest = fsp->h_u.tcp_ip4_spec.pdst; - - for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; - i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { - fd_data->pctype = i; - ret = i40e_program_fdir_filter(fd_data, pf, add); - - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - } - } - - return err ? -EOPNOTSUPP : 0; + if ((rule->dst_ip[0] != input->dst_ip[0]) || + (rule->src_ip[0] != input->src_ip[0]) || + (rule->dst_port != input->dst_port) || + (rule->src_port != input->src_port)) + return false; + return true; } -#define I40E_TCPIP_DUMMY_PACKET_LEN 54 /** - * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it + * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry + * @vsi: Pointer to the targeted VSI + * @input: The filter to update or NULL to indicate deletion + * @sw_idx: Software index to the filter + * @cmd: The command to get or set Rx flow classification rules * - * Returns 0 if the filters were successfully added or removed + * This function updates (or deletes) a Flow Director entry from + * the hlist of the corresponding PF + * + * Returns 0 on success **/ -static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) +static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, + u16 sw_idx, + struct ethtool_rxnfc *cmd) { + struct i40e_fdir_filter *rule, *parent; struct i40e_pf *pf = vsi->back; - struct tcphdr *tcp; - struct iphdr *ip; - bool err = false; - int ret; - /* Dummy packet */ - char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, - 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0}; - - memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN); - - ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); - tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET - + sizeof(struct iphdr)); - - ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; - tcp->dest = fsp->h_u.tcp_ip4_spec.pdst; - ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src; - tcp->source = fsp->h_u.tcp_ip4_spec.psrc; - - if (add) { - if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); - pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - } - } + struct hlist_node *node2; + int err = -EINVAL; - fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; - ret = i40e_program_fdir_filter(fd_data, pf, add); + parent = NULL; + rule = NULL; - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->fd_id >= sw_idx) + break; + parent = rule; } - fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - - ret = i40e_program_fdir_filter(fd_data, pf, add); - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->fd_id == sw_idx)) { + if (input && !i40e_match_fdir_input_set(rule, input)) + err = i40e_add_del_fdir(vsi, rule, false); + else if (!input) + err = i40e_add_del_fdir(vsi, rule, false); + hlist_del(&rule->fdir_node); + kfree(rule); + pf->fdir_pf_active_filters--; } - return err ? -EOPNOTSUPP : 0; -} + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; -/** - * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it - * - * Returns 0 if the filters were successfully added or removed - **/ -static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) -{ - return -EOPNOTSUPP; + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_after(&parent->fdir_node, &input->fdir_node); + else + hlist_add_head(&input->fdir_node, + &pf->fdir_filter_list); + + /* update counts */ + pf->fdir_pf_active_filters++; + + return 0; } -#define I40E_IP_DUMMY_PACKET_LEN 34 /** - * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required for the FDir descriptor - * @fsp: the ethtool flow spec - * @add: true adds a filter, false removes it + * i40e_del_fdir_entry - Deletes a Flow Director filter entry + * @vsi: Pointer to the targeted VSI + * @cmd: The command to get or set Rx flow classification rules * - * Returns 0 if the filters were successfully added or removed - **/ -static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) + * The function removes a Flow Director filter entry from the + * hlist of the corresponding PF + * + * Returns 0 on success + */ +static int i40e_del_fdir_entry(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) { + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; struct i40e_pf *pf = vsi->back; - struct iphdr *ip; - bool err = false; - int ret; - int i; - char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, - 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - - memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); - ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); + int ret = 0; - ip->saddr = fsp->h_u.usr_ip4_spec.ip4src; - ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst; - ip->protocol = fsp->h_u.usr_ip4_spec.proto; + ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); - for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; - i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { - fd_data->pctype = i; - ret = i40e_program_fdir_filter(fd_data, pf, add); - - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - } - } - - return err ? -EOPNOTSUPP : 0; + i40e_fdir_check_and_reenable(pf); + return ret; } /** - * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for - * a specific flow spec based on their protocol + * i40e_add_fdir_ethtool - Add/Remove Flow Director filters * @vsi: pointer to the targeted VSI * @cmd: command to get or set RX flow classification rules - * @add: true adds a filter, false removes it * - * Returns 0 if the filters were successfully added or removed + * Add Flow Director filters for a specific flow spec based on their + * protocol. Returns 0 if the filters were successfully added. **/ -static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi, - struct ethtool_rxnfc *cmd, bool add) +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) { - struct i40e_fdir_data fd_data; - int ret = -EINVAL; + struct ethtool_rx_flow_spec *fsp; + struct i40e_fdir_filter *input; struct i40e_pf *pf; - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; + int ret = -EINVAL; if (!vsi) return -EINVAL; pf = vsi->back; - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= vsi->num_queue_pairs)) - return -EINVAL; + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return -EOPNOTSUPP; - /* Populate the Flow Director that we have at the moment - * and allocate the raw packet buffer for the calling functions - */ - fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, - GFP_KERNEL); + if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) + return -ENOSPC; - if (!fd_data.raw_packet) { - dev_info(&pf->pdev->dev, "Could not allocate memory\n"); - return -ENOMEM; + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + + pf->hw.func_caps.fd_filters_guaranteed)) { + return -EINVAL; } - fd_data.q_index = fsp->ring_cookie; - fd_data.flex_off = 0; - fd_data.pctype = 0; - fd_data.dest_vsi = vsi->id; - fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; - fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; - fd_data.cnt_index = 0; - fd_data.fd_id = 0; + if (fsp->ring_cookie >= vsi->num_queue_pairs) + return -EINVAL; - switch (fsp->flow_type & ~FLOW_EXT) { - case TCP_V4_FLOW: - ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); - break; - case UDP_V4_FLOW: - ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); - break; - case SCTP_V4_FLOW: - ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); - break; - case IPV4_FLOW: - ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add); - break; - case IP_USER_FLOW: - switch (fsp->h_u.usr_ip4_spec.proto) { - case IPPROTO_TCP: - ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); - break; - case IPPROTO_UDP: - ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); - break; - case IPPROTO_SCTP: - ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); - break; - default: - ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add); - break; - } - break; - default: - dev_info(&pf->pdev->dev, "Could not specify spec type\n"); - ret = -EINVAL; - } + input = kzalloc(sizeof(*input), GFP_KERNEL); + + if (!input) + return -ENOMEM; - kfree(fd_data.raw_packet); - fd_data.raw_packet = NULL; + input->fd_id = fsp->location; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else + input->dest_ctl = + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + + input->q_index = fsp->ring_cookie; + input->flex_off = 0; + input->pctype = 0; + input->dest_vsi = vsi->id; + input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; + input->cnt_index = 0; + input->flow_type = fsp->flow_type; + input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; + input->src_port = fsp->h_u.tcp_ip4_spec.psrc; + input->dst_port = fsp->h_u.tcp_ip4_spec.pdst; + input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + + ret = i40e_add_del_fdir(vsi, input, true); + if (ret) + kfree(input); + else + i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); return ret; } @@ -1580,10 +1560,10 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) ret = i40e_set_rss_hash_opt(pf, cmd); break; case ETHTOOL_SRXCLSRLINS: - ret = i40e_add_del_fdir_ethtool(vsi, cmd, true); + ret = i40e_add_fdir_ethtool(vsi, cmd); break; case ETHTOOL_SRXCLSRLDEL: - ret = i40e_add_del_fdir_ethtool(vsi, cmd, false); + ret = i40e_del_fdir_entry(vsi, cmd); break; default: break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b901371ca361..861b722c2672 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -26,6 +26,7 @@ /* Local includes */ #include "i40e.h" +#include "i40e_diag.h" #ifdef CONFIG_I40E_VXLAN #include <net/vxlan.h> #endif @@ -38,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 30 +#define DRV_VERSION_BUILD 36 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -305,6 +306,7 @@ static void i40e_tx_timeout(struct net_device *netdev) break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + set_bit(__I40E_DOWN, &vsi->state); i40e_down(vsi); break; } @@ -375,20 +377,20 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( continue; do { - start = u64_stats_fetch_begin_bh(&tx_ring->syncp); + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); packets = tx_ring->stats.packets; bytes = tx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; rx_ring = &tx_ring[1]; do { - start = u64_stats_fetch_begin_bh(&rx_ring->syncp); + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); packets = rx_ring->stats.packets; bytes = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; @@ -739,6 +741,7 @@ void i40e_update_stats(struct i40e_vsi *vsi) u32 rx_page, rx_buf; u64 rx_p, rx_b; u64 tx_p, tx_b; + u32 val; int i; u16 q; @@ -769,10 +772,10 @@ void i40e_update_stats(struct i40e_vsi *vsi) p = ACCESS_ONCE(vsi->tx_rings[q]); do { - start = u64_stats_fetch_begin_bh(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; @@ -781,10 +784,10 @@ void i40e_update_stats(struct i40e_vsi *vsi) /* Rx queue is part of the same block as Tx queue */ p = &p[1]; do { - start = u64_stats_fetch_begin_bh(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; @@ -971,6 +974,20 @@ void i40e_update_stats(struct i40e_vsi *vsi) i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + nsd->tx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + nsd->rx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + i40e_stat_update32(hw, I40E_PRTPM_TLPIC, + pf->stat_offsets_loaded, + &osd->tx_lpi_count, &nsd->tx_lpi_count); + i40e_stat_update32(hw, I40E_PRTPM_RLPIC, + pf->stat_offsets_loaded, + &osd->rx_lpi_count, &nsd->rx_lpi_count); } pf->stat_offsets_loaded = true; @@ -1964,11 +1981,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); - /* If the network stack called us with vid = 0, we should - * indicate to i40e_vsi_add_vlan() that we want to receive - * any traffic (i.e. with any vlan tag, or untagged) + /* If the network stack called us with vid = 0 then + * it is asking to receive priority tagged packets with + * vlan id 0. Our HW receives them by default when configured + * to receive untagged packets so there is no need to add an + * extra filter for vlan 0 tagged packets. */ - ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); + if (vid) + ret = i40e_vsi_add_vlan(vsi, vid); if (!ret && (vid < VLAN_N_VID)) set_bit(vid, vsi->active_vlans); @@ -1981,7 +2001,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, * @netdev: network interface to be adjusted * @vid: vlan id to be removed * - * net_device_ops implementation for adding vlan ids + * net_device_ops implementation for removing vlan ids **/ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) @@ -2177,6 +2197,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + /* FDIR VSI tx ring can still use RS bit and writebacks */ + if (vsi->type != I40E_VSI_FDIR) + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = ring->dma + + (ring->count * sizeof(struct i40e_tx_desc)); /* As part of VSI creation/update, FW allocates certain * Tx arbitration queue sets for each TC enabled for @@ -2420,6 +2445,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) } /** + * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters + * @vsi: Pointer to the targeted VSI + * + * This function replays the hlist on the hw where all the SB Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) +{ + struct i40e_fdir_filter *filter; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + + hlist_for_each_entry_safe(filter, node, + &pf->fdir_filter_list, fdir_node) { + i40e_add_del_fdir(vsi, filter, true); + } +} + +/** * i40e_vsi_configure - Set up the VSI for action * @vsi: the VSI being configured **/ @@ -2557,7 +2604,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); - /* Associate the queue pair to the vector and enable the q int */ + /* Associate the queue pair to the vector and enable the queue int */ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); @@ -2831,12 +2878,14 @@ static irqreturn_t i40e_intr(int irq, void *data) val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; - if (val == I40E_RESET_CORER) + if (val == I40E_RESET_CORER) { pf->corer_count++; - else if (val == I40E_RESET_GLOBR) + } else if (val == I40E_RESET_GLOBR) { pf->globr_count++; - else if (val == I40E_RESET_EMPR) + } else if (val == I40E_RESET_EMPR) { pf->empr_count++; + set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + } } if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { @@ -2866,8 +2915,7 @@ static irqreturn_t i40e_intr(int irq, void *data) icr0_remaining); if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || - (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || - (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) { + (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { dev_info(&pf->pdev->dev, "device will be reset\n"); set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); @@ -3107,13 +3155,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - j = 1000; - do { - usleep_range(1000, 2000); + for (j = 0; j < 50; j++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); - } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) - ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); - + if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == + ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } /* Skip if the queue is already in the requested state */ if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) continue; @@ -3123,8 +3171,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) /* turn on/off the queue */ if (enable) { wr32(hw, I40E_QTX_HEAD(pf_q), 0); - tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | - I40E_QTX_ENA_QENA_STAT_MASK; + tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; } else { tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; } @@ -3171,12 +3218,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - j = 1000; - do { - usleep_range(1000, 2000); + for (j = 0; j < 50; j++) { rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); - } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) - ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); + if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == + ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } if (enable) { /* is STAT set ? */ @@ -3190,11 +3238,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) /* turn on/off the queue */ if (enable) - rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | - I40E_QRX_ENA_QENA_STAT_MASK; + rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; else - rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | - I40E_QRX_ENA_QENA_STAT_MASK); + rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); /* wait for the change to finish */ @@ -3732,8 +3778,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, NULL); if (aq_ret) { dev_info(&vsi->back->pdev->dev, - "%s: AQ command Config VSI BW allocation per TC failed = %d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "AQ command Config VSI BW allocation per TC failed = %d\n", + vsi->back->hw.aq.asq_last_status); return -EINVAL; } @@ -4062,6 +4108,10 @@ static int i40e_up_complete(struct i40e_vsi *vsi) } else if (vsi->netdev) { netdev_info(vsi->netdev, "NIC Link is Down\n"); } + + /* replay FDIR SB filters */ + if (vsi->type == I40E_VSI_FDIR) + i40e_fdir_filter_restore(vsi); i40e_service_event_schedule(pf); return 0; @@ -4208,15 +4258,40 @@ static int i40e_open(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - char int_name[IFNAMSIZ]; int err; - /* disallow open during test */ - if (test_bit(__I40E_TESTING, &pf->state)) + /* disallow open during test or if eeprom is broken */ + if (test_bit(__I40E_TESTING, &pf->state) || + test_bit(__I40E_BAD_EEPROM, &pf->state)) return -EBUSY; netif_carrier_off(netdev); + err = i40e_vsi_open(vsi); + if (err) + return err; + +#ifdef CONFIG_I40E_VXLAN + vxlan_get_rx_port(netdev); +#endif + + return 0; +} + +/** + * i40e_vsi_open - + * @vsi: the VSI to open + * + * Finish initialization of the VSI. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_vsi_open(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + char int_name[IFNAMSIZ]; + int err; + /* allocate descriptors */ err = i40e_vsi_setup_tx_resources(vsi); if (err) @@ -4229,18 +4304,22 @@ static int i40e_open(struct net_device *netdev) if (err) goto err_setup_rx; + if (!vsi->netdev) { + err = EINVAL; + goto err_setup_rx; + } snprintf(int_name, sizeof(int_name) - 1, "%s-%s", - dev_driver_string(&pf->pdev->dev), netdev->name); + dev_driver_string(&pf->pdev->dev), vsi->netdev->name); err = i40e_vsi_request_irq(vsi, int_name); if (err) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs); + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; - err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs); + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; @@ -4248,10 +4327,6 @@ static int i40e_open(struct net_device *netdev) if (err) goto err_up_complete; -#ifdef CONFIG_I40E_VXLAN - vxlan_get_rx_port(netdev); -#endif - return 0; err_up_complete: @@ -4269,6 +4344,26 @@ err_setup_tx: } /** + * i40e_fdir_filter_exit - Cleans up the Flow Director accounting + * @pf: Pointer to pf + * + * This function destroys the hlist where all the Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_exit(struct i40e_pf *pf) +{ + struct i40e_fdir_filter *filter; + struct hlist_node *node2; + + hlist_for_each_entry_safe(filter, node2, + &pf->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + pf->fdir_pf_active_filters = 0; +} + +/** * i40e_close - Disables a network interface * @netdev: network interface device structure * @@ -4321,7 +4416,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) * for the warning interrupt will deal with the shutdown * and recovery of the switch setup. */ - dev_info(&pf->pdev->dev, "GlobalR requested\n"); + dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); @@ -4332,7 +4427,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) * * Same as Global Reset, except does *not* include the MAC/PHY */ - dev_info(&pf->pdev->dev, "CoreR requested\n"); + dev_dbg(&pf->pdev->dev, "CoreR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_CORER_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); @@ -4366,7 +4461,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) * the switch, since we need to do all the recovery as * for the Core Reset. */ - dev_info(&pf->pdev->dev, "PFR requested\n"); + dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf); } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { @@ -4415,18 +4510,18 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, &old_cfg->etscfg.prioritytable, sizeof(new_cfg->etscfg.prioritytable))) { need_reconfig = true; - dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n"); + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); } if (memcmp(&new_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable, sizeof(new_cfg->etscfg.tcbwtable))) - dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); if (memcmp(&new_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable, sizeof(new_cfg->etscfg.tsatable))) - dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n"); + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); } /* Check if PFC configuration has changed */ @@ -4434,7 +4529,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, &old_cfg->pfc, sizeof(new_cfg->pfc))) { need_reconfig = true; - dev_info(&pf->pdev->dev, "PFC config change detected.\n"); + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); } /* Check if APP Table has changed */ @@ -4442,7 +4537,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, &old_cfg->app, sizeof(new_cfg->app))) { need_reconfig = true; - dev_info(&pf->pdev->dev, "APP Table change detected.\n"); + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } return need_reconfig; @@ -4492,7 +4587,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* No change detected in DCBX configs */ if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { - dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); + dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } @@ -4550,8 +4645,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, struct i40e_vf *vf; u16 vf_id; - dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", - __func__, queue, qtx_ctl); + dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", + queue, qtx_ctl); /* Queue belongs to VF, find the VF and issue VF reset */ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) @@ -4581,6 +4676,54 @@ static void i40e_service_event_complete(struct i40e_pf *pf) } /** + * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW + * @pf: board private structure + **/ +int i40e_get_current_fd_count(struct i40e_pf *pf) +{ + int val, fcnt_prog; + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); + fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + return fcnt_prog; +} + +/** + * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled + * @pf: board private structure + **/ +void i40e_fdir_check_and_reenable(struct i40e_pf *pf) +{ + u32 fcnt_prog, fcnt_avail; + + /* Check if, FD SB or ATR was auto disabled and if there is enough room + * to re-enable + */ + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + fcnt_prog = i40e_get_current_fd_count(pf); + fcnt_avail = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + } + } + /* Wait for some more space to be available to turn on ATR */ + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + } + } +} + +/** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure **/ @@ -4589,11 +4732,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) return; - pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; - /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, &pf->state)) return; + i40e_fdir_check_and_reenable(pf); + + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->flags & I40E_FLAG_FD_SB_ENABLED)) + pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; } /** @@ -4903,7 +5049,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) event.msg_size); break; case i40e_aqc_opc_lldp_update_mib: - dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); + dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); #ifdef CONFIG_I40E_DCB rtnl_lock(); ret = i40e_handle_lldp_event(pf, &event); @@ -4911,7 +5057,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) #endif /* CONFIG_I40E_DCB */ break; case i40e_aqc_opc_event_lan_overflow: - dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); + dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); i40e_handle_lan_overflow_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_peer: @@ -4936,6 +5082,31 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) } /** + * i40e_verify_eeprom - make sure eeprom is good to use + * @pf: board private structure + **/ +static void i40e_verify_eeprom(struct i40e_pf *pf) +{ + int err; + + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + /* retry in case of garbage read */ + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", + err); + set_bit(__I40E_BAD_EEPROM, &pf->state); + } + } + + if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { + dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); + clear_bit(__I40E_BAD_EEPROM, &pf->state); + } +} + +/** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * @@ -5053,6 +5224,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf) /* increment MSI-X count because current FW skips one */ pf->hw.func_caps.num_msix_vectors++; + if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || + (pf->hw.aq.fw_maj_ver < 2)) { + pf->hw.func_caps.num_msix_vectors++; + pf->hw.func_caps.num_msix_vectors_vf++; + } + if (pf->hw.debug_mask & I40E_DEBUG_USER) dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", @@ -5132,9 +5309,9 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) err = i40e_up_complete(vsi); if (err) goto err_up_complete; + clear_bit(__I40E_NEEDS_RESTART, &vsi->state); } - clear_bit(__I40E_NEEDS_RESTART, &vsi->state); return; err_up_complete: @@ -5157,6 +5334,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) { int i; + i40e_fdir_filter_exit(pf); for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_vsi_release(pf->vsi[i]); @@ -5181,7 +5359,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf) if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) return 0; - dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); if (i40e_check_asq_alive(hw)) i40e_vc_notify_reset(pf); @@ -5228,7 +5406,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (test_bit(__I40E_DOWN, &pf->state)) goto end_core_reset; - dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); + dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); @@ -5237,6 +5415,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) goto end_core_reset; } + /* re-verify the eeprom if we just had an EMP reset */ + if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { + clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + i40e_verify_eeprom(pf); + } + ret = i40e_get_capabilities(pf); if (ret) { dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", @@ -5278,7 +5462,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) * try to recover minimal use by getting the basic PF VSI working. */ if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { - dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); + dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); /* find the one VEB connected to the MAC, and find orphans */ for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) @@ -5331,6 +5515,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); + if (pf->num_alloc_vfs) { + for (v = 0; v < pf->num_alloc_vfs; v++) + i40e_reset_vf(&pf->vf[v], true); + } + /* tell the firmware that we're starting */ dv.major_version = DRV_VERSION_MAJOR; dv.minor_version = DRV_VERSION_MINOR; @@ -5338,7 +5527,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) dv.subbuild_version = 0; i40e_aq_send_driver_version(&pf->hw, &dv, NULL); - dev_info(&pf->pdev->dev, "PF reset done\n"); + dev_info(&pf->pdev->dev, "reset complete\n"); end_core_reset: clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); @@ -5387,7 +5576,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; dev_info(&pf->pdev->dev, - "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", + "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; @@ -5401,7 +5590,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; dev_info(&pf->pdev->dev, - "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", + "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; @@ -5850,37 +6039,16 @@ err_out: **/ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) { - int err = 0; - - pf->num_msix_entries = 0; - while (vectors >= I40E_MIN_MSIX) { - err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors); - if (err == 0) { - /* good to go */ - pf->num_msix_entries = vectors; - break; - } else if (err < 0) { - /* total failure */ - dev_info(&pf->pdev->dev, - "MSI-X vector reservation failed: %d\n", err); - vectors = 0; - break; - } else { - /* err > 0 is the hint for retry */ - dev_info(&pf->pdev->dev, - "MSI-X vectors wanted %d, retrying with %d\n", - vectors, err); - vectors = err; - } - } - - if (vectors > 0 && vectors < I40E_MIN_MSIX) { + vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, + I40E_MIN_MSIX, vectors); + if (vectors < 0) { dev_info(&pf->pdev->dev, - "Couldn't get enough vectors, only %d available\n", - vectors); + "MSI-X vector reservation failed: %d\n", vectors); vectors = 0; } + pf->num_msix_entries = vectors; + return vectors; } @@ -5942,7 +6110,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } else if (vec == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ - dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); + dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; @@ -5978,13 +6146,13 @@ static int i40e_init_msix(struct i40e_pf *pf) } /** - * i40e_alloc_q_vector - Allocate memory for a single interrupt vector + * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * @vsi: the VSI being configured * @v_idx: index of the vector in the vsi struct * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ -static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector; @@ -6010,13 +6178,13 @@ static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) } /** - * i40e_alloc_q_vectors - Allocate memory for interrupt vectors + * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ -static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) +static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int v_idx, num_q_vectors; @@ -6031,7 +6199,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) return -EINVAL; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = i40e_alloc_q_vector(vsi, v_idx); + err = i40e_vsi_alloc_q_vector(vsi, v_idx); if (err) goto err_out; } @@ -6071,7 +6239,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { - dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); + dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); err = pci_enable_msi(pf->pdev); if (err) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); @@ -6080,7 +6248,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) - dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); + dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* track first vector for misc interrupts */ err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); @@ -6107,7 +6275,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) i40e_intr, 0, pf->misc_int_name, pf); if (err) { dev_info(&pf->pdev->dev, - "request_irq for msix_misc failed: %d\n", err); + "request_irq for %s failed: %d\n", + pf->misc_int_name, err); return -EFAULT; } } @@ -6258,15 +6427,11 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - dev_info(&pf->pdev->dev, - "Flow Director ATR mode Enabled\n"); if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; - dev_info(&pf->pdev->dev, - "Flow Director Side Band mode Enabled\n"); } else { dev_info(&pf->pdev->dev, - "Flow Director Side Band mode Disabled in MFP mode\n"); + "Flow Director Sideband mode Disabled in MFP mode\n"); } pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; @@ -6287,9 +6452,6 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); - dev_info(&pf->pdev->dev, - "Number of VFs being requested for PF[%d] = %d\n", - pf->hw.pf_id, pf->num_req_vfs); } #endif /* CONFIG_PCI_IOV */ pf->eeprom_version = 0xDEAD; @@ -6326,6 +6488,39 @@ sw_init_done: } /** + * i40e_set_ntuple - set the ntuple feature flag and take action + * @pf: board private structure to initialize + * @features: the feature set that the stack is suggesting + * + * returns a bool to indicate if reset needs to happen + **/ +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) +{ + bool need_reset = false; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + if (features & NETIF_F_NTUPLE) { + /* Enable filters and mark for reset */ + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + need_reset = true; + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + } else { + /* turn off filters, mark for reset and clear SW filter list */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + need_reset = true; + i40e_fdir_filter_exit(pf); + } + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + /* if ATR was disabled it can be re-enabled. */ + if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + } + return need_reset; +} + +/** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -6335,12 +6530,19 @@ static int i40e_set_features(struct net_device *netdev, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + bool need_reset; if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); + need_reset = i40e_set_ntuple(pf, features); + + if (need_reset) + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + return 0; } @@ -6464,6 +6666,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, + .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, #ifdef CONFIG_I40E_VXLAN .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, @@ -6495,10 +6698,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features = NETIF_F_IP_CSUM | + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_TSO | - NETIF_F_SG; + NETIF_F_TSO; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | @@ -6512,6 +6714,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM | + NETIF_F_NTUPLE | NETIF_F_RXHASH | 0; @@ -6771,8 +6974,6 @@ int i40e_vsi_release(struct i40e_vsi *vsi) if (vsi->netdev) { /* results in a call to i40e_close() */ unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; } } else { if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) @@ -6791,6 +6992,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); + if (vsi->netdev) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } i40e_vsi_clear_rings(vsi); i40e_vsi_clear(vsi); @@ -6845,13 +7050,12 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) } if (vsi->base_vector) { - dev_info(&pf->pdev->dev, - "VSI %d has non-zero base vector %d\n", + dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", vsi->seid, vsi->base_vector); return -EEXIST; } - ret = i40e_alloc_q_vectors(vsi); + ret = i40e_vsi_alloc_q_vectors(vsi); if (ret) { dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for VSI %d, ret=%d\n", @@ -6865,7 +7069,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, - "failed to get q tracking for VSI %d, err=%d\n", + "failed to get queue tracking for VSI %d, err=%d\n", vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; @@ -7822,6 +8026,44 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) return 0; } +#define INFO_STRING_LEN 255 +static void i40e_print_features(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + char *buf, *string; + + string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!string) { + dev_err(&pf->pdev->dev, "Features string allocation failed\n"); + return; + } + + buf = string; + + buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); +#ifdef CONFIG_PCI_IOV + buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); +#endif + buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs); + + if (pf->flags & I40E_FLAG_RSS_ENABLED) + buf += sprintf(buf, "RSS "); + buf += sprintf(buf, "FDir "); + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) + buf += sprintf(buf, "ATR "); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) + buf += sprintf(buf, "NTUPLE "); + if (pf->flags & I40E_FLAG_DCB_ENABLED) + buf += sprintf(buf, "DCB "); + if (pf->flags & I40E_FLAG_PTP) + buf += sprintf(buf, "PTP "); + + BUG_ON(buf > (string + INFO_STRING_LEN)); + dev_info(&pf->pdev->dev, "%s\n", string); + kfree(string); +} + /** * i40e_probe - Device initialization routine * @pdev: PCI device information struct @@ -7848,17 +8090,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; /* set up for high or low dma */ - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - /* coherent mask for the same size will always succeed if - * dma_set_mask does - */ - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - } else { - dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); - err = -EIO; - goto err_dma; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } } /* set up pci connections */ @@ -7946,13 +8185,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_adminq(hw); dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); - if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK) - >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) { - dev_info(&pdev->dev, - "warning: NVM version not supported, supported version: %02x.%02x\n", - I40E_CURRENT_NVM_VERSION_HI, - I40E_CURRENT_NVM_VERSION_LO); - } if (err) { dev_info(&pdev->dev, "init_adminq failed: %d expecting API %02x.%02x\n", @@ -7961,6 +8193,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pf_reset; } + i40e_verify_eeprom(pf); + i40e_clear_pxe_mode(hw); err = i40e_get_capabilities(pf); if (err) @@ -8062,7 +8296,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED)) { + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { u32 val; /* disable link interrupts for VFs */ @@ -8070,6 +8305,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); i40e_flush(hw); + + if (pci_num_vf(pdev)) { + dev_info(&pdev->dev, + "Active VFs found, allocating resources.\n"); + err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); + if (err) + dev_info(&pdev->dev, + "Error %d allocating resources for existing VFs\n", + err); + } } pfs_found++; @@ -8092,7 +8337,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_set_pci_config_data(hw, link_status); - dev_info(&pdev->dev, "PCI Express: %s %s\n", + dev_info(&pdev->dev, "PCI-Express: %s %s\n", (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : @@ -8109,6 +8354,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } + /* print a string summarizing features */ + i40e_print_features(pf); + return 0; /* Unwind what we've done if something failed in the setup */ @@ -8165,16 +8413,16 @@ static void i40e_remove(struct pci_dev *pdev) i40e_ptp_stop(pf); - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { - i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; - } - /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + } + i40e_fdir_teardown(pf); /* If there is a switch structure or any orphans, remove them. diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 73f95b081927..262bdf11d221 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -27,14 +27,14 @@ #include "i40e_prototype.h" /** - * i40e_init_nvm_ops - Initialize NVM function pointers. - * @hw: pointer to the HW structure. + * i40e_init_nvm_ops - Initialize NVM function pointers + * @hw: pointer to the HW structure * - * Setups the function pointers and the NVM info structure. Should be called - * once per NVM initialization, e.g. inside the i40e_init_shared_code(). - * Please notice that the NVM term is used here (& in all methods covered - * in this file) as an equivalent of the FLASH part mapped into the SR. - * We are accessing FLASH always thru the Shadow RAM. + * Setup the function pointers and the NVM info structure. Should be called + * once per NVM initialization, e.g. inside the i40e_init_shared_code(). + * Please notice that the NVM term is used here (& in all methods covered + * in this file) as an equivalent of the FLASH part mapped into the SR. + * We are accessing FLASH always thru the Shadow RAM. **/ i40e_status i40e_init_nvm(struct i40e_hw *hw) { @@ -49,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) gens = rd32(hw, I40E_GLNVM_GENS); sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> I40E_GLNVM_GENS_SR_SIZE_SHIFT); - /* Switching to words (sr_size contains power of 2KB). */ + /* Switching to words (sr_size contains power of 2KB) */ nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; - /* Check if we are in the normal or blank NVM programming mode. */ + /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, I40E_GLNVM_FLA); - if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */ - /* Max NVM timeout. */ + if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ + /* Max NVM timeout */ nvm->timeout = I40E_MAX_NVM_TIMEOUT; nvm->blank_nvm_mode = false; - } else { /* Blank programming mode. */ + } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; ret_code = I40E_ERR_NVM_BLANK_MODE; hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); @@ -68,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) } /** - * i40e_acquire_nvm - Generic request for acquiring the NVM ownership. - * @hw: pointer to the HW structure. - * @access: NVM access type (read or write). + * i40e_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) * - * This function will request NVM ownership for reading - * via the proper Admin Command. + * This function will request NVM ownership for reading + * via the proper Admin Command. **/ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access) @@ -87,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 0, &time, NULL); - /* Reading the Global Device Timer. */ + /* Reading the Global Device Timer */ gtime = rd32(hw, I40E_GLVFGEN_TIMER); - /* Store the timeout. */ + /* Store the timeout */ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime; if (ret_code) { - /* Set the polling timeout. */ + /* Set the polling timeout */ if (time > I40E_MAX_NVM_TIMEOUT) timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; else timeout = hw->nvm.hw_semaphore_timeout; - /* Poll until the current NVM owner timeouts. */ + /* Poll until the current NVM owner timeouts */ while (gtime < timeout) { usleep_range(10000, 20000); ret_code = i40e_aq_request_resource(hw, @@ -128,10 +128,10 @@ i40e_i40e_acquire_nvm_exit: } /** - * i40e_release_nvm - Generic request for releasing the NVM ownership. - * @hw: pointer to the HW structure. + * i40e_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure * - * This function will release NVM resource via the proper Admin Command. + * This function will release NVM resource via the proper Admin Command. **/ void i40e_release_nvm(struct i40e_hw *hw) { @@ -140,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw) } /** - * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit. - * @hw: pointer to the HW structure. + * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit + * @hw: pointer to the HW structure * - * Polls the SRCTL Shadow RAM register done bit. + * Polls the SRCTL Shadow RAM register done bit. **/ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 srctl, wait_cnt; - /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */ + /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { srctl = rd32(hw, I40E_GLNVM_SRCTL); if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { @@ -165,12 +165,12 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) } /** - * i40e_read_nvm_word - Reads Shadow RAM - * @hw: pointer to the HW structure. - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - * @data: word read from the Shadow RAM. + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM * - * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) @@ -184,15 +184,15 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, goto read_nvm_exit; } - /* Poll the done bit first. */ + /* Poll the done bit first */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { - /* Write the address and start reading. */ + /* Write the address and start reading */ sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | (1 << I40E_GLNVM_SRCTL_START_SHIFT); wr32(hw, I40E_GLNVM_SRCTL, sr_reg); - /* Poll I40E_GLNVM_SRCTL until the done bit is set. */ + /* Poll I40E_GLNVM_SRCTL until the done bit is set */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { sr_reg = rd32(hw, I40E_GLNVM_SRDATA); @@ -210,16 +210,15 @@ read_nvm_exit: } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer. - * @hw: pointer to the HW structure. - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - * @words: number of words to read (in) & - * number of words read before the NVM ownership timeout (out). - * @data: words read from the Shadow RAM. + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM * - * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() - * method. The buffer read is preceded by the NVM ownership take - * and followed by the release. + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. **/ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) @@ -227,7 +226,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, i40e_status ret_code = 0; u16 index, word; - /* Loop thru the selected region. */ + /* Loop thru the selected region */ for (word = 0; word < *words; word++) { index = offset + word; ret_code = i40e_read_nvm_word(hw, index, &data[word]); @@ -235,21 +234,21 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, break; } - /* Update the number of words read from the Shadow RAM. */ + /* Update the number of words read from the Shadow RAM */ *words = word; return ret_code; } /** - * i40e_calc_nvm_checksum - Calculates and returns the checksum - * @hw: pointer to hardware structure - * @checksum: pointer to the checksum + * i40e_calc_nvm_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @checksum: pointer to the checksum * - * This function calculate SW Checksum that covers the whole 64kB shadow RAM - * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD - * is customer specific and unknown. Therefore, this function skips all maximum - * possible size of VPD (1kB). + * This function calculates SW Checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD + * is customer specific and unknown. Therefore, this function skips all maximum + * possible size of VPD (1kB). **/ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) @@ -311,12 +310,12 @@ i40e_calc_nvm_checksum_exit: } /** - * i40e_validate_nvm_checksum - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum: calculated checksum + * i40e_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum: calculated checksum * - * Performs checksum calculation and validates the NVM SW checksum. If the - * caller does not need checksum, the value can be NULL. + * Performs checksum calculation and validates the NVM SW checksum. If the + * caller does not need checksum, the value can be NULL. **/ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum) diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index ed91f93ede2b..9cd57e617959 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -231,6 +231,13 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40e_ptype_lookup[ptype]; +} + /* prototype for functions used for SW locks */ /* i40e_common for VF drivers*/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index d4bb482b1a7f..0f5d96ad281d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include "i40e.h" +#include "i40e_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) @@ -39,11 +40,12 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** * i40e_program_fdir_filter - Program a Flow Director filter - * @fdir_input: Packet data that will be filter parameters + * @fdir_data: Packet data that will be filter parameters + * @raw_packet: the pre-allocated packet buffer for FDir * @pf: The pf pointer * @add: True for add/update, False for remove **/ -int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, struct i40e_pf *pf, bool add) { struct i40e_filter_program_desc *fdir_desc; @@ -68,8 +70,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, tx_ring = vsi->tx_rings[0]; dev = tx_ring->dev; - dma = dma_map_single(dev, fdir_data->raw_packet, - I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); + dma = dma_map_single(dev, raw_packet, + I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto dma_fail; @@ -132,14 +134,14 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; /* record length, and DMA address */ - dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); + dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); dma_unmap_addr_set(tx_buf, dma, dma); tx_desc->buffer_addr = cpu_to_le64(dma); td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); + build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); /* set the timestamp */ tx_buf->time_stamp = jiffies; @@ -161,26 +163,329 @@ dma_fail: return -1; } +#define IP_HEADER_OFFSET 14 +#define I40E_UDPIP_DUMMY_PACKET_LEN 42 +/** + * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + struct i40e_pf *pf = vsi->back; + struct udphdr *udp; + struct iphdr *ip; + bool err = false; + int ret; + int i; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + udp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + udp->source = fd_data->src_port; + + for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; + i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { + fd_data->pctype = i; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + } + + return err ? -EOPNOTSUPP : 0; +} + +#define I40E_TCPIP_DUMMY_PACKET_LEN 54 +/** + * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + struct i40e_pf *pf = vsi->back; + struct tcphdr *tcp; + struct iphdr *ip; + bool err = false; + int ret; + /* Dummy packet */ + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11, + 0x0, 0x72, 0, 0, 0, 0}; + + memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + tcp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + tcp->source = fd_data->src_port; + + if (add) { + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + } + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Always returns -EOPNOTSUPP + **/ +static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + return -EOPNOTSUPP; +} + +#define I40E_IP_DUMMY_PACKET_LEN 34 +/** + * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + struct i40e_pf *pf = vsi->back; + struct iphdr *ip; + bool err = false; + int ret; + int i; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; + + memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + + ip->saddr = fd_data->src_ip[0]; + ip->daddr = fd_data->dst_ip[0]; + ip->protocol = 0; + + for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { + fd_data->pctype = i; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir - Build raw packets to add/del fdir filter + * @vsi: pointer to the targeted VSI + * @cmd: command to get or set RX flow classification rules + * @add: true adds a filter, false removes it + * + **/ +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add) +{ + struct i40e_pf *pf = vsi->back; + u8 *raw_packet; + int ret; + + /* Populate the Flow Director that we have at the moment + * and allocate the raw packet buffer for the calling functions + */ + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + + switch (input->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet, + add); + break; + case UDP_V4_FLOW: + ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet, + add); + break; + case SCTP_V4_FLOW: + ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet, + add); + break; + case IPV4_FLOW: + ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet, + add); + break; + case IP_USER_FLOW: + switch (input->ip4_proto) { + case IPPROTO_TCP: + ret = i40e_add_del_fdir_tcpv4(vsi, input, + raw_packet, add); + break; + case IPPROTO_UDP: + ret = i40e_add_del_fdir_udpv4(vsi, input, + raw_packet, add); + break; + case IPPROTO_SCTP: + ret = i40e_add_del_fdir_sctpv4(vsi, input, + raw_packet, add); + break; + default: + ret = i40e_add_del_fdir_ipv4(vsi, input, + raw_packet, add); + break; + } + break; + default: + dev_info(&pf->pdev->dev, "Could not specify spec type %d", + input->flow_type); + ret = -EINVAL; + } + + kfree(raw_packet); + return ret; +} + /** * i40e_fd_handle_status - check the Programming Status for FD * @rx_ring: the Rx ring for this descriptor - * @qw: the descriptor data + * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor. * @prog_id: the id originally used for programming * * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ -static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) +static void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) { - struct pci_dev *pdev = rx_ring->vsi->back->pdev; + struct i40e_pf *pf = rx_ring->vsi->back; + struct pci_dev *pdev = pf->pdev; + u32 fcnt_prog, fcnt_avail; u32 error; + u64 qw; + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; - /* for now just print the Status */ - dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n", - prog_id, error); + if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", + rx_desc->wb.qword0.hi_dword.fd_id); + + /* filter programming failed most likely due to table full */ + fcnt_prog = i40e_get_current_fd_count(pf); + fcnt_avail = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + + /* If ATR is running fcnt_prog can quickly change, + * if we are very close to full, it makes sense to disable + * FD ATR/SB and then re-enable it when there is room. + */ + if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { + /* Turn off ATR first */ + if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); + pf->auto_disable_flags |= + I40E_FLAG_FD_ATR_ENABLED; + pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; + } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + pf->auto_disable_flags |= + I40E_FLAG_FD_SB_ENABLED; + pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; + } + } else { + dev_info(&pdev->dev, "FD filter programming error"); + } + } else if (error == + (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n", + rx_desc->wb.qword0.hi_dword.fd_id); + } } /** @@ -315,6 +620,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed @@ -325,6 +644,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; unsigned int total_packets = 0; unsigned int total_bytes = 0; @@ -333,6 +653,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -343,9 +665,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) /* prevent any other reads prior to eop_desc */ read_barrier_depends(); - /* if the descriptor isn't done, no work yet to do */ - if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) break; /* clear next_to_watch to prevent false hangs */ @@ -577,7 +898,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) - i40e_fd_handle_status(rx_ring, qw, id); + i40e_fd_handle_status(rx_ring, rx_desc, id); } /** @@ -601,6 +922,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -892,7 +1217,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* likely incorrect csum if alternate IP extention headers found */ + /* likely incorrect csum if alternate IP extension headers found */ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) return; @@ -956,6 +1281,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring, } /** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; +} + +/** * i40e_clean_rx_irq - Reclaim resources after receive completes * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -972,8 +1320,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + u8 rx_ptype; u64 qword; - u16 rx_ptype; + + if (budget <= 0) + return 0; rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); @@ -1087,7 +1438,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) goto next_desc; } - skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1246,8 +1598,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; - tx_ring->atr_count++; - /* snag network header to get L4 type and address */ hdr.network = skb_network_header(skb); @@ -1269,8 +1619,17 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, th = (struct tcphdr *)(hdr.network + hlen); - /* sample on all syn/fin packets or once every atr sample rate */ - if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate)) + /* Due to lack of space, no more new filters can be programmed */ + if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + tx_ring->atr_count++; + + /* sample on all syn/fin/rst packets or once every atr sample rate */ + if (!th->fin && + !th->syn && + !th->rst && + (tx_ring->atr_count < tx_ring->atr_sample_rate)) return; tx_ring->atr_count = 0; @@ -1294,7 +1653,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; - dtype_cmd |= th->fin ? + dtype_cmd |= (th->fin || th->rst) ? (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << I40E_TXD_FLTR_QW1_PCMD_SHIFT) : (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << @@ -1596,7 +1955,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, struct i40e_tx_context_desc *context_desc; int i = tx_ring->next_to_use; - if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ @@ -1707,9 +2067,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ +#define WB_STRIDE 0x3 + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), @@ -1812,7 +2186,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, + * + 4 desc gap to avoid the cache line where head is, * + 1 desc for context descriptor, * otherwise try next time */ @@ -1823,7 +2197,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, count += skb_shinfo(skb)->nr_frags; #endif count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 3)) { + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 181a825d3160..71a968fe557f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -91,6 +91,7 @@ enum i40e_debug_mask { I40E_DEBUG_FLOW = 0x00000200, I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, @@ -458,6 +459,10 @@ union i40e_32byte_rx_desc { union { __le32 rss; /* RSS Hash */ __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; } hi_dword; } qword0; struct { @@ -698,7 +703,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks { enum i40e_rx_prog_status_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 }; @@ -1010,6 +1015,11 @@ struct i40e_hw_port_stats { u64 tx_size_big; /* ptc9522 */ u64 mac_short_packet_dropped; /* mspdc */ u64 checksum_error; /* xec */ + /* EEE LPI */ + bool tx_lpi_status; + bool rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ }; /* Checksum and Shadow RAM pointers */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index b9d1c1c8ca5a..02c11a7f7d29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -69,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) { struct i40e_pf *pf = vf->pf; - return vector_id <= pf->hw.func_caps.num_msix_vectors_vf; + return vector_id < pf->hw.func_caps.num_msix_vectors_vf; } /***********************vf resource mgmt routines*****************/ @@ -126,8 +126,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); else reg_idx = I40E_VPINT_LNKLSTN( - (pf->hw.func_caps.num_msix_vectors_vf - * vf->vf_id) + (vector_id - 1)); + ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + + (vector_id - 1)); if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { /* Special case - No queues mapped on this vector */ @@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, tx_ctx.qlen = info->ring_len; tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); tx_ctx.rdylist_act = 0; + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = info->dma_ring_addr + + (info->ring_len * sizeof(struct i40e_tx_desc)); /* clear the context in the HMC */ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); @@ -408,18 +411,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) "Could not allocate VF broadcast filter\n"); } - if (!f) { - dev_err(&pf->pdev->dev, "Unable to add ucast filter\n"); - ret = -ENOMEM; - goto error_alloc_vsi_res; - } - /* program mac filter */ ret = i40e_sync_vsi_filters(vsi); - if (ret) { + if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); - goto error_alloc_vsi_res; - } error_alloc_vsi_res: return ret; @@ -514,7 +509,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf) vf->lan_vsi_index = 0; vf->lan_vsi_id = 0; } - msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1; + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + /* disable interrupts so the VF starts in a known state */ for (i = 0; i < msix_vf; i++) { /* format is same for both registers */ @@ -679,9 +675,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) complete_reset: /* reallocate vf resources to reset the VSI state */ i40e_free_vf_res(vf); - mdelay(10); i40e_alloc_vf_res(vf); i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); @@ -847,7 +843,7 @@ void i40e_free_vfs(struct i40e_pf *pf) * * allocate vf resources **/ -static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) { struct i40e_vf *vfs; int i, ret = 0; @@ -855,16 +851,18 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* Disable interrupt 0 so we don't try to handle the VFLR. */ i40e_irq_dynamic_disable_icr0(pf); - ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); - if (ret) { - dev_err(&pf->pdev->dev, - "pci_enable_sriov failed with error %d!\n", ret); - pf->num_alloc_vfs = 0; - goto err_iov; + /* Check to see if we're just allocating resources for extant VFs */ + if (pci_num_vf(pf->pdev) != num_alloc_vfs) { + ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to enable SR-IOV, error %d.\n", ret); + pf->num_alloc_vfs = 0; + goto err_iov; + } } - /* allocate memory */ - vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL); + vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); if (!vfs) { ret = -ENOMEM; goto err_alloc; @@ -1776,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; - int local_vf_id = vf_id - hw->func_caps.vf_base_id; + unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; struct i40e_vf *vf; int ret; @@ -1873,7 +1871,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /* clear the bit in GLGEN_VFLRSTAT */ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); - i40e_reset_vf(vf, true); + if (!test_bit(__I40E_DOWN, &pf->state)) + i40e_reset_vf(vf, true); } } @@ -1924,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, void i40e_vc_notify_link_state(struct i40e_pf *pf) { struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf = pf->vf; + struct i40e_link_status *ls = &pf->hw.phy.link_info; + int i; pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; - pfe.event_data.link_event.link_status = - pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; - - i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, - (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + } else { + pfe.event_data.link_event.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = ls->link_speed; + } + i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), + NULL); + vf++; + } } /** @@ -2197,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, error_param: return ret; } + +/** + * i40e_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: vf identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_out; + } + + vf = &pf->vf[vf_id]; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + pfe.event_data.link_event.link_status = + pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = + pf->hw.phy.link_info.link_speed; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + pfe.event_data.link_event.link_status = true; + pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + break; + default: + ret = -EINVAL; + goto error_out; + } + /* Notify the VF of its new link state */ + i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + +error_out: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index cc1feee36e12..389c47f396d5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -98,10 +98,13 @@ struct i40e_vf { unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ + bool link_forced; + bool link_up; /* only valid if vf link is forced */ }; void i40e_free_vfs(struct i40e_pf *pf); int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); @@ -115,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); + void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index f7cea1bca38d..97662b6bd98a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - __le32 tenant_id ; + __le32 tenant_id; u8 reserved[4]; __le16 queue_number; #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 7b13953b28c4..ae084378faab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -160,6 +160,372 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, } +/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40evf_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + + /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure @@ -199,8 +565,7 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, details.async = true; cmd_details = &details; } - status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, - msglen, cmd_details); + status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details); return status; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 7841573a58c9..97ab8c2b76f8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -63,6 +63,13 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, i40e_status i40e_set_mac_type(struct i40e_hw *hw); +extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40evf_ptype_lookup[ptype]; +} + /* prototype for functions used for SW locks */ /* i40e_common for VF drivers*/ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index ffdb01d853db..53be5f44d015 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -24,6 +24,7 @@ #include <linux/prefetch.h> #include "i40evf.h" +#include "i40e_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) @@ -169,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed @@ -179,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; unsigned int total_packets = 0; unsigned int total_bytes = 0; @@ -187,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -197,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) /* prevent any other reads prior to eop_desc */ read_barrier_depends(); - /* if the descriptor isn't done, no work yet to do */ - if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) break; /* clear next_to_watch to prevent false hangs */ @@ -431,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -722,7 +743,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* likely incorrect csum if alternate IP extention headers found */ + /* likely incorrect csum if alternate IP extension headers found */ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) return; @@ -786,6 +807,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring, } /** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; +} + +/** * i40e_clean_rx_irq - Reclaim resources after receive completes * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -802,13 +846,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + u8 rx_ptype; u64 qword; - u16 rx_ptype; rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) - >> I40E_RXD_QW1_STATUS_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { union i40e_rx_desc *next_rxd; @@ -912,7 +956,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) goto next_desc; } - skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1241,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, struct i40e_tx_context_desc *context_desc; int i = tx_ring->next_to_use; - if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ @@ -1352,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ +#define WB_STRIDE 0x3 + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), @@ -1457,7 +1517,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, + * + 4 desc gap to avoid the cache line where head is, * + 1 desc for context descriptor, * otherwise try next time */ @@ -1468,7 +1528,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, count += skb_shinfo(skb)->nr_frags; #endif count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 3)) { + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3bffac06592f..4673b3381edd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -64,8 +64,6 @@ struct i40e_hw; typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); -#define ETH_ALEN 6 - /* Data type manipulation macros. */ #define I40E_DESC_UNUSED(R) \ @@ -90,6 +88,7 @@ enum i40e_debug_mask { I40E_DEBUG_FLOW = 0x00000200, I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, @@ -466,6 +465,10 @@ union i40e_32byte_rx_desc { union { __le32 rss; /* RSS Hash */ __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; } hi_dword; } qword0; struct { @@ -706,7 +709,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks { enum i40e_rx_prog_status_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 }; @@ -1018,6 +1021,11 @@ struct i40e_hw_port_stats { u64 tx_size_big; /* ptc9522 */ u64 mac_short_packet_dropped; /* mspdc */ u64 checksum_error; /* xec */ + /* EEE LPI */ + bool tx_lpi_status; + bool rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ }; /* Checksum and Shadow RAM pointers */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index ff6529b288a1..807807d62387 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,8 +38,6 @@ #include <linux/ipv6.h> #include <net/ip6_checksum.h> #include <net/udp.h> -#include <linux/sctp.h> - #include "i40e_type.h" #include "i40e_virtchnl.h" @@ -164,15 +162,14 @@ struct i40evf_vlan_filter { /* Driver state. The order of these is important! */ enum i40evf_state_t { __I40EVF_STARTUP, /* driver loaded, probe complete */ - __I40EVF_FAILED, /* PF communication failed. Fatal. */ __I40EVF_REMOVE, /* driver is being unloaded */ __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ __I40EVF_INIT_SW, /* got resources, setting up structs */ + __I40EVF_RESETTING, /* in reset */ /* Below here, watchdog is running */ __I40EVF_DOWN, /* ready, can be opened */ __I40EVF_TESTING, /* in ethtool self-test */ - __I40EVF_RESETTING, /* in reset */ __I40EVF_RUNNING, /* opened, working */ }; @@ -185,47 +182,25 @@ enum i40evf_critical_section_t { /* board specific private data structure */ struct i40evf_adapter { struct timer_list watchdog_timer; - struct vlan_group *vlgrp; struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work init_task; struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct list_head vlan_filter_list; - char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; - - /* Interrupt Throttle Rate */ - u32 itr_setting; - u16 eitr_low; - u16 eitr_high; + char misc_vector_name[IFNAMSIZ + 9]; /* TX */ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; - u64 restart_queue; - u64 hw_csum_tx_good; - u64 lsc_int; - u64 hw_tso_ctxt; - u64 hw_tso6_ctxt; u32 tx_timeout_count; struct list_head mac_filter_list; -#ifdef DEBUG - bool detect_tx_hung; -#endif /* DEBUG */ /* RX */ struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; - int txd_count; - int rxd_count; u64 hw_csum_rx_error; - u64 hw_rx_no_dma_resources; - u64 hw_csum_rx_good; - u64 non_eop_descs; int num_msix_vectors; struct msix_entry *msix_entries; - u64 rx_hdr_split; - - u32 init_state; - volatile unsigned long flags; + u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) #define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) #define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) @@ -234,6 +209,9 @@ struct i40evf_adapter { #define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) #define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) #define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) +#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8) +#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9) +#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10) /* duplcates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 @@ -251,21 +229,19 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) #define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) #define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) + /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; - /* structs defined in i40e_vf.h */ - struct i40e_hw hw; + struct i40e_hw hw; /* defined in i40e_type.h */ enum i40evf_state_t state; volatile unsigned long crit_section; - u64 tx_busy; struct work_struct watchdog_task; bool netdev_registered; - bool dev_closed; bool link_up; enum i40e_virtchnl_ops current_op; struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ @@ -276,11 +252,6 @@ struct i40evf_adapter { u32 aq_wait_count; }; -struct i40evf_info { - enum i40e_mac_type mac; - unsigned int flags; -}; - /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; @@ -315,6 +286,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter); void i40evf_del_vlans(struct i40evf_adapter *adapter); void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); void i40evf_request_stats(struct i40evf_adapter *adapter); +void i40evf_request_reset(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index b0b1f4bf5ac0..8b0db1ce179c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); u32 new_rx_count, new_tx_count; + int i; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; @@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev, new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); /* if nothing to do return success */ - if ((new_tx_count == adapter->txd_count) && - (new_rx_count == adapter->rxd_count)) + if ((new_tx_count == adapter->tx_rings[0]->count) && + (new_rx_count == adapter->rx_rings[0]->count)) return 0; - adapter->txd_count = new_tx_count; - adapter->rxd_count = new_rx_count; + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + adapter->tx_rings[0]->count = new_tx_count; + adapter->rx_rings[0]->count = new_rx_count; + } if (netif_running(netdev)) i40evf_reinit_locked(adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index f5caf4419243..e35e66ffa782 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710 X710 Virtual Function Network Driver"; -#define DRV_VERSION "0.9.11" +#define DRV_VERSION "0.9.16" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = - "Copyright (c) 2013 Intel Corporation."; + "Copyright (c) 2013 - 2014 Intel Corporation."; /* i40evf_pci_tbl - PCI Device ID Table * @@ -167,9 +167,11 @@ static void i40evf_tx_timeout(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - - /* Do the reset outside of interrupt context */ - schedule_work(&adapter->reset_task); + dev_info(&adapter->pdev->dev, "TX timeout detected.\n"); + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } } /** @@ -211,6 +213,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter) int i; struct i40e_hw *hw = &adapter->hw; + if (!adapter->msix_entries) + return; + for (i = 1; i < adapter->num_msix_vectors; i++) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); synchronize_irq(adapter->msix_entries[i].vector); @@ -511,12 +516,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) struct net_device *netdev = adapter->netdev; int err; - sprintf(adapter->name[0], "i40evf:mbx"); + sprintf(adapter->misc_vector_name, "i40evf:mbx"); err = request_irq(adapter->msix_entries[0].vector, - &i40evf_msix_aq, 0, adapter->name[0], netdev); + &i40evf_msix_aq, 0, + adapter->misc_vector_name, netdev); if (err) { dev_err(&adapter->pdev->dev, - "request_irq for msix_aq failed: %d\n", err); + "request_irq for %s failed: %d\n", + adapter->misc_vector_name, err); free_irq(adapter->msix_entries[0].vector, netdev); } return err; @@ -963,16 +970,23 @@ void i40evf_down(struct i40evf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct i40evf_mac_filter *f; - /* remove all MAC filters from the VSI */ + /* remove all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->remove = true; } - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; - /* disable receives */ - adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; - mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); - msleep(20); - + /* remove all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->remove = true; + } + if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __I40EVF_RESETTING) { + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + /* disable receives */ + adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + msleep(20); + } netif_tx_disable(netdev); netif_tx_stop_all_queues(netdev); @@ -1124,8 +1138,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) * than CPU's. So let's be conservative and only ask for * (roughly) twice the number of vectors as there are CPU's. */ - v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; - v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1); + v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; + v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. @@ -1291,19 +1305,47 @@ static void i40evf_watchdog_task(struct work_struct *work) watchdog_task); struct i40e_hw *hw = &adapter->hw; - if (adapter->state < __I40EVF_DOWN) + if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + dev_info(&adapter->pdev->dev, "Checking for redemption\n"); + if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) { + /* A chance for redemption! */ + dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); + adapter->state = __I40EVF_STARTUP; + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + schedule_delayed_work(&adapter->init_task, 10); + clear_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section); + /* Don't reschedule the watchdog, since we've restarted + * the init task. When init_task contacts the PF and + * gets everything set up again, it'll restart the + * watchdog for us. Down, boy. Sit. Stay. Woof. + */ + return; + } + adapter->aq_pending = 0; + adapter->aq_required = 0; + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; goto watchdog_done; + } - if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + if ((adapter->state < __I40EVF_DOWN) || + (adapter->flags & I40EVF_FLAG_RESET_PENDING)) goto watchdog_done; - /* check for unannounced reset */ - if ((adapter->state != __I40EVF_RESETTING) && + /* check for reset */ + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { adapter->state = __I40EVF_RESETTING; + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_err(&adapter->pdev->dev, "Hardware reset detected.\n"); + dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); schedule_work(&adapter->reset_task); - dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n", - __func__); + adapter->aq_pending = 0; + adapter->aq_required = 0; + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; goto watchdog_done; } @@ -1358,16 +1400,25 @@ static void i40evf_watchdog_task(struct work_struct *work) i40evf_irq_enable(adapter, true); i40evf_fire_sw_int(adapter, 0xFF); + watchdog_done: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +restart_watchdog: if (adapter->aq_required) mod_timer(&adapter->watchdog_timer, jiffies + msecs_to_jiffies(20)); else mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); schedule_work(&adapter->adminq_task); } +static int next_queue(struct i40evf_adapter *adapter, int j) +{ + j += 1; + + return j >= adapter->vsi_res->num_queue_pairs ? 0 : j; +} + /** * i40evf_configure_rss - Prepare for RSS if used * @adapter: board private structure @@ -1398,19 +1449,19 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) { - if (j == adapter->vsi_res->num_queue_pairs) - j = 0; - /* lut = 4-byte sliding window of 4 lut entries */ - lut = (lut << 8) | (j & - ((0x1 << 8) - 1)); - /* On i = 3, we have 4 entries in lut; write to the register */ - if ((i & 3) == 3) - wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); + j = adapter->vsi_res->num_queue_pairs; + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + lut = next_queue(adapter, j); + lut |= next_queue(adapter, j) << 8; + lut |= next_queue(adapter, j) << 16; + lut |= next_queue(adapter, j) << 24; + wr32(hw, I40E_VFQF_HLUT(i), lut); } i40e_flush(hw); } +#define I40EVF_RESET_WAIT_MS 100 +#define I40EVF_RESET_WAIT_COUNT 200 /** * i40evf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct @@ -1421,8 +1472,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) **/ static void i40evf_reset_task(struct work_struct *work) { - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, reset_task); + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + reset_task); struct i40e_hw *hw = &adapter->hw; int i = 0, err; uint32_t rstat_val; @@ -1431,21 +1483,61 @@ static void i40evf_reset_task(struct work_struct *work) &adapter->crit_section)) udelay(500); - /* wait until the reset is complete */ - for (i = 0; i < 20; i++) { + if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { + dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); + i40evf_request_reset(adapter); + } + + /* poll until we see the reset actually happen */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if (rstat_val == I40E_VFR_COMPLETED) + if (rstat_val != I40E_VFR_VFACTIVE) { + dev_info(&adapter->pdev->dev, "Reset now occurring\n"); break; - else - mdelay(100); + } else { + msleep(I40EVF_RESET_WAIT_MS); + } + } + if (i == I40EVF_RESET_WAIT_COUNT) { + dev_err(&adapter->pdev->dev, "Reset was not detected\n"); + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + goto continue_reset; /* act like the reset happened */ + } + + /* wait until the reset is complete and the PF is responding to us */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (rstat_val == I40E_VFR_VFACTIVE) { + dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n"); + break; + } else { + msleep(I40EVF_RESET_WAIT_MS); + } } - if (i == 20) { + if (i == I40EVF_RESET_WAIT_COUNT) { /* reset never finished */ - dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n", - __func__, rstat_val); - /* carry on anyway */ + dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n", + rstat_val); + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + + if (netif_running(adapter->netdev)) + i40evf_close(adapter->netdev); + + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_queues(adapter); + kfree(adapter->vf_res); + i40evf_shutdown_adminq(hw); + adapter->netdev->flags &= ~IFF_UP; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; /* Do not attempt to reinit. It's dead, Jim. */ } + +continue_reset: + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + i40evf_down(adapter); adapter->state = __I40EVF_RESETTING; @@ -1505,6 +1597,9 @@ static void i40evf_adminq_task(struct work_struct *work) i40e_status ret; u16 pending; + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + return; + event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); if (!event.msg_buf) { @@ -1636,6 +1731,10 @@ static int i40evf_open(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); int err; + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + return -EIO; + } if (adapter->state != __I40EVF_DOWN) return -EBUSY; @@ -1690,8 +1789,12 @@ static int i40evf_close(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (adapter->state <= __I40EVF_DOWN) + return 0; + /* signal that we are down to the interrupt handler */ adapter->state = __I40EVF_DOWN; + set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_down(adapter); @@ -1842,16 +1945,18 @@ static void i40evf_init_task(struct work_struct *work) switch (adapter->state) { case __I40EVF_STARTUP: /* driver loaded, probe complete */ + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; err = i40e_set_mac_type(hw); if (err) { - dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n", - __func__, err); + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", + err); goto err; } err = i40evf_check_reset_complete(hw); if (err) { - dev_info(&pdev->dev, "%s: device is still in reset (%d).\n", - __func__, err); + dev_err(&pdev->dev, "Device is still in reset (%d)\n", + err); goto err; } hw->aq.num_arq_entries = I40EVF_AQ_LEN; @@ -1861,14 +1966,13 @@ static void i40evf_init_task(struct work_struct *work) err = i40evf_init_adminq(hw); if (err) { - dev_info(&pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", + err); goto err; } err = i40evf_send_api_ver(adapter); if (err) { - dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n", - __func__, err); + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); i40evf_shutdown_adminq(hw); goto err; } @@ -1876,19 +1980,21 @@ static void i40evf_init_task(struct work_struct *work) goto restart; break; case __I40EVF_INIT_VERSION_CHECK: - if (!i40evf_asq_done(hw)) + if (!i40evf_asq_done(hw)) { + dev_err(&pdev->dev, "Admin queue command never completed.\n"); goto err; + } /* aq msg sent, awaiting reply */ err = i40evf_verify_api_ver(adapter); if (err) { - dev_err(&pdev->dev, "Unable to verify API version, error %d\n", + dev_err(&pdev->dev, "Unable to verify API version (%d)\n", err); goto err; } err = i40evf_send_vf_config_msg(adapter); if (err) { - dev_err(&pdev->dev, "Unable send config request, error %d\n", + dev_err(&pdev->dev, "Unable send config request (%d)\n", err); goto err; } @@ -1902,18 +2008,15 @@ static void i40evf_init_task(struct work_struct *work) (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource)); adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); - if (!adapter->vf_res) { - dev_err(&pdev->dev, "%s: unable to allocate memory\n", - __func__); + if (!adapter->vf_res) goto err; - } } err = i40evf_get_vf_config(adapter); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) goto restart; if (err) { - dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n", - __func__, err); + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", + err); goto err_alloc; } adapter->state = __I40EVF_INIT_SW; @@ -1927,25 +2030,23 @@ static void i40evf_init_task(struct work_struct *work) adapter->vsi_res = &adapter->vf_res->vsi_res[i]; } if (!adapter->vsi_res) { - dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__); + dev_err(&pdev->dev, "No LAN VSI found\n"); goto err_alloc; } adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; - adapter->txd_count = I40EVF_DEFAULT_TXD; - adapter->rxd_count = I40EVF_DEFAULT_RXD; - netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - - netdev->features |= NETIF_F_SG | + netdev->features |= NETIF_F_HIGHDMA | + NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXCSUM | NETIF_F_GRO; if (adapter->vf_res->vf_offload_flags @@ -1956,11 +2057,13 @@ static void i40evf_init_task(struct work_struct *work) NETIF_F_HW_VLAN_CTAG_FILTER; } - /* The HW MAC address was set and/or determined in sw_init */ + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features &= ~NETIF_F_RXCSUM; + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { - dev_info(&pdev->dev, - "Invalid MAC address %pMAC, using random\n", - adapter->hw.mac.addr); + dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n", + adapter->hw.mac.addr); random_ether_addr(adapter->hw.mac.addr); } memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); @@ -1994,8 +2097,6 @@ static void i40evf_init_task(struct work_struct *work) netif_carrier_off(netdev); - strcpy(netdev->name, "eth%d"); - adapter->vsi.id = adapter->vsi_res->vsi_id; adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ adapter->vsi.back = adapter; @@ -2005,9 +2106,11 @@ static void i40evf_init_task(struct work_struct *work) adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; adapter->vsi.netdev = adapter->netdev; - err = register_netdev(netdev); - if (err) - goto err_register; + if (!adapter->netdev_registered) { + err = register_netdev(netdev); + if (err) + goto err_register; + } adapter->netdev_registered = true; @@ -2031,7 +2134,6 @@ err_register: i40evf_free_misc_irq(adapter); err_sw_init: i40evf_reset_interrupt_capability(adapter); - adapter->state = __I40EVF_FAILED; err_alloc: kfree(adapter->vf_res); adapter->vf_res = NULL; @@ -2039,9 +2141,7 @@ err: /* Things went into the weeds, so try again later */ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); - if (hw->aq.asq.count) - i40evf_shutdown_adminq(hw); /* ignore error */ - adapter->state = __I40EVF_FAILED; + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; return; /* do not reschedule */ } schedule_delayed_work(&adapter->init_task, HZ * 3); @@ -2084,26 +2184,20 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev; struct i40evf_adapter *adapter = NULL; struct i40e_hw *hw = NULL; - int err, pci_using_dac; + int err; err = pci_enable_device(pdev); if (err) return err; - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = true; - /* coherent mask for the same size will always succeed if - * dma_set_mask does - */ - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { - pci_using_dac = false; - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - } else { - dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n", - __func__, err); - err = -EIO; - goto err_dma; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } } err = pci_request_regions(pdev, i40evf_driver_name); @@ -2128,8 +2222,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; adapter->netdev = netdev; adapter->pdev = pdev; @@ -2271,6 +2363,7 @@ static void i40evf_remove(struct pci_dev *pdev) struct i40e_hw *hw = &adapter->hw; cancel_delayed_work_sync(&adapter->init_task); + cancel_work_sync(&adapter->reset_task); if (adapter->netdev_registered) { unregister_netdev(netdev); @@ -2278,17 +2371,15 @@ static void i40evf_remove(struct pci_dev *pdev) } adapter->state = __I40EVF_REMOVE; - if (adapter->num_msix_vectors) { + if (adapter->msix_entries) { i40evf_misc_irq_disable(adapter); - del_timer_sync(&adapter->watchdog_timer); - - flush_scheduled_work(); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); } + del_timer_sync(&adapter->watchdog_timer); + flush_scheduled_work(); + if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index e6978d79e62b..e294f012647d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, struct i40e_hw *hw = &adapter->hw; i40e_status err; + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", @@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) /* if the request failed, don't lock out others */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; } +/** + * i40evf_request_reset + * @adapter: adapter structure + * + * Request that the PF reset this VF. No response is expected. + **/ +void i40evf_request_reset(struct i40evf_adapter *adapter) +{ + /* Don't check CURRENT_OP - this is always higher priority */ + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} /** * i40evf_virtchnl_completion @@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } break; case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: - adapter->state = __I40EVF_RESETTING; - schedule_work(&adapter->reset_task); - dev_info(&adapter->pdev->dev, - "%s: hardware reset pending\n", __func__); + dev_info(&adapter->pdev->dev, "PF reset warning received\n"); + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); + schedule_work(&adapter->reset_task); + } break; default: dev_err(&adapter->pdev->dev, diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index f19700e285bb..5bcb2de75933 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2013 Intel Corporation. +# Copyright(c) 1999 - 2014 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ # more details. # # You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# this program; if not, see <http://www.gnu.org/licenses/>. # # The full GNU General Public License is included in this distribution in # the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 06df6928f44c..fa36fe12e775 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -77,8 +76,6 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; -#define E1000_82580_RXPBS_TABLE_SIZE \ - (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) /** * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO @@ -2308,7 +2305,7 @@ u16 igb_rxpbs_adjust_82580(u32 data) { u16 ret_val = 0; - if (data < E1000_82580_RXPBS_TABLE_SIZE) + if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) ret_val = e1000_82580_rxpbs_table[data]; return ret_val; @@ -2714,13 +2711,14 @@ static const u8 e1000_emc_therm_limit[4] = { E1000_EMC_DIODE3_THERM_LIMIT }; +#ifdef CONFIG_IGB_HWMON /** * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data * @hw: pointer to hardware structure * * Updates the temperatures in mac.thermal_sensor_data **/ -s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) { s32 status = E1000_SUCCESS; u16 ets_offset; @@ -2774,7 +2772,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) * Sets the thermal sensor thresholds according to the NVM map * and save off the threshold and location values into mac.thermal_sensor_data **/ -s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) { s32 status = E1000_SUCCESS; u16 ets_offset; @@ -2836,6 +2834,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) return status; } +#endif static struct e1000_mac_operations e1000_mac_ops_82575 = { .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 8c2437722aad..09d78be72416 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -231,6 +230,10 @@ struct e1000_adv_tx_context_desc { #define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ #define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + #define E1000_VLVF_ARRAY_SIZE 32 #define E1000_VLVF_VLANID_MASK 0x00000FFF #define E1000_VLVF_POOLSEL_SHIFT 12 @@ -266,8 +269,7 @@ u16 igb_rxpbs_adjust_82580(u32 data); s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); s32 igb_set_eee_i350(struct e1000_hw *); s32 igb_set_eee_i354(struct e1000_hw *); -s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *); -s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw); +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 #define E1000_EMC_INTERNAL_DATA 0x00 diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 0571b973be80..b05bf925ac72 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -44,7 +43,11 @@ #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ /* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + /* Physical Func Reset Done Indication */ #define E1000_CTRL_EXT_PFRSTD 0x00004000 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 @@ -191,7 +194,8 @@ /* enable link status from external LINK_0 and LINK_1 pins */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ @@ -529,8 +533,67 @@ #define E1000_TIMINCA_16NS_SHIFT 24 -#define E1000_TSICR_TXTS 0x00000002 -#define E1000_TSIM_TXTS 0x00000002 +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index ab99e2b582a8..10741d170f2d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 0c0393316a3a..db963397cc27 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -35,6 +34,8 @@ #include "e1000_hw.h" #include "e1000_i210.h" +static s32 igb_update_flash_i210(struct e1000_hw *hw); + /** * igb_get_hw_semaphore_i210 - Acquire hardware semaphore * @hw: pointer to the HW structure @@ -111,7 +112,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). **/ -s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) { return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); } @@ -123,7 +124,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw) * Stop any current commands to the EEPROM and clear the EEPROM request bit, * then release the semaphores acquired. **/ -void igb_release_nvm_i210(struct e1000_hw *hw) +static void igb_release_nvm_i210(struct e1000_hw *hw) { igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); } @@ -206,8 +207,8 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) * Reads a 16 bit word from the Shadow Ram using the EERD register. * Uses necessary synchronization semaphores. **/ -s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) +static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) { s32 status = E1000_SUCCESS; u16 i, count; @@ -306,8 +307,8 @@ out: * If error code is returned, data and Shadow RAM may be inconsistent - buffer * partially written. **/ -s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) +static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) { s32 status = E1000_SUCCESS; u16 i, count; @@ -555,7 +556,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw, * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ -s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) { s32 status = E1000_SUCCESS; s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); @@ -590,7 +591,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. Next commit EEPROM data onto the Flash. **/ -s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 checksum = 0; @@ -684,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw) * @hw: pointer to the HW structure * **/ -s32 igb_update_flash_i210(struct e1000_hw *hw) +static s32 igb_update_flash_i210(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u32 flup; diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 2d913716573a..907fe99a9813 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,17 +27,8 @@ #ifndef _E1000_I210_H_ #define _E1000_I210_H_ -s32 igb_update_flash_i210(struct e1000_hw *hw); -s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw); -s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw); -s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); -s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); -s32 igb_acquire_nvm_i210(struct e1000_hw *hw); -void igb_release_nvm_i210(struct e1000_hw *hw); s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); s32 igb_read_invm_version(struct e1000_hw *hw, struct e1000_fw_version *invm_ver); diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 298f0ed50670..5910a932ea7c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index e4cbe8ef67b3..99299ba8ee3a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index dac1447fabf7..d5b121771c31 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index de9bba41acf3..f52f5515e5a8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index a7db7f3db914..9abf82919c65 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index 433b7419cb98..5b101170b17e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index ad2b74d95138..4009bbab7407 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -394,77 +393,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) } /** - * e1000_write_sfp_data_byte - Writes SFP module data. - * @hw: pointer to the HW structure - * @offset: byte location offset to write to - * @data: data to write - * - * Writes one byte to SFP module data stored - * in SFP resided EEPROM memory or SFP diagnostic area. - * Function should be called with - * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access - * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters - * access - **/ -s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) -{ - u32 i = 0; - u32 i2ccmd = 0; - u32 data_local = 0; - - if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { - hw_dbg("I2CCMD command address exceeds upper limit\n"); - return -E1000_ERR_PHY; - } - /* The programming interface is 16 bits wide - * so we need to read the whole word first - * then update appropriate byte lane and write - * the updated word back. - */ - /* Set up Op-code, EEPROM Address,in the I2CCMD - * register. The MAC will take care of interfacing - * with an EEPROM to write the data given. - */ - i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - E1000_I2CCMD_OPCODE_READ); - /* Set a command to read single word */ - wr32(E1000_I2CCMD, i2ccmd); - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { - udelay(50); - /* Poll the ready bit to see if lastly - * launched I2C operation completed - */ - i2ccmd = rd32(E1000_I2CCMD); - if (i2ccmd & E1000_I2CCMD_READY) { - /* Check if this is READ or WRITE phase */ - if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == - E1000_I2CCMD_OPCODE_READ) { - /* Write the selected byte - * lane and update whole word - */ - data_local = i2ccmd & 0xFF00; - data_local |= data; - i2ccmd = ((offset << - E1000_I2CCMD_REG_ADDR_SHIFT) | - E1000_I2CCMD_OPCODE_WRITE | data_local); - wr32(E1000_I2CCMD, i2ccmd); - } else { - break; - } - } - } - if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Write did not complete\n"); - return -E1000_ERR_PHY; - } - if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } - return 0; -} - -/** * igb_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 6a0873f2095a..4c2c36c46a73 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -70,7 +69,6 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); -s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); s32 igb_copper_link_setup_82580(struct e1000_hw *hw); s32 igb_get_phy_info_82580(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 82632c6c53af..bdb246e848e1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -41,6 +40,7 @@ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ @@ -102,6 +102,14 @@ #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ #define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ #define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ #define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ #define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ @@ -349,16 +357,30 @@ #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) #define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) #define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine * Filter - RW */ #define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) -#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) -#define rd32(reg) (readl(hw->hw_addr + reg)) +struct e1000_hw; + +u32 igb_rd32(struct e1000_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igb_rd32(hw, reg)) + #define wrfl() ((void)rd32(E1000_STATUS)) #define array_wr32(reg, offset, value) \ - (writel(value, hw->hw_addr + reg + ((offset) << 2))) + wr32((reg) + ((offset) << 2), (value)) + #define array_rd32(reg, offset) \ (readl(hw->hw_addr + reg + ((offset) << 2))) @@ -397,4 +419,6 @@ #define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) #define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ +#define E1000_REMOVED(h) unlikely(!(h)) + #endif diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index ccf472f073dd..7fbe1e925143 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -42,6 +41,7 @@ #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/pci.h> +#include <linux/mdio.h> struct igb_adapter; @@ -434,6 +434,7 @@ struct igb_adapter { struct delayed_work ptp_overflow_work; struct work_struct ptp_tx_work; struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; unsigned long ptp_tx_start; unsigned long last_rx_ptp_check; spinlock_t tmreg_lock; @@ -456,6 +457,7 @@ struct igb_adapter { unsigned long link_check_timeout; int copper_tries; struct e1000_info ei; + u16 eee_advert; }; #define IGB_FLAG_HAS_MSI (1 << 0) @@ -472,6 +474,7 @@ struct igb_adapter { #define IGB_FLAG_MAS_CAPABLE (1 << 11) #define IGB_FLAG_MAS_ENABLE (1 << 12) #define IGB_FLAG_HAS_MSIX (1 << 13) +#define IGB_FLAG_EEE (1 << 14) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 @@ -489,7 +492,8 @@ struct igb_adapter { enum e1000_state_t { __IGB_TESTING, __IGB_RESETTING, - __IGB_DOWN + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, }; enum igb_boards { @@ -525,9 +529,7 @@ void igb_set_fw_version(struct igb_adapter *); void igb_ptp_init(struct igb_adapter *adapter); void igb_ptp_stop(struct igb_adapter *adapter); void igb_ptp_reset(struct igb_adapter *adapter); -void igb_ptp_tx_work(struct work_struct *work); void igb_ptp_rx_hang(struct igb_adapter *adapter); -void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, struct sk_buff *skb); @@ -545,8 +547,8 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, rx_ring->last_rx_timestamp = jiffies; } -int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd); +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); #ifdef CONFIG_IGB_HWMON void igb_sysfs_exit(struct igb_adapter *adapter); int igb_sysfs_init(struct igb_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 1df02378de69..e5570acbeea8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -2274,15 +2273,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev, ring = adapter->tx_ring[j]; do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); data[i] = ring->tx_stats.packets; data[i+1] = ring->tx_stats.bytes; data[i+2] = ring->tx_stats.restart_queue; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); restart2 = ring->tx_stats.restart_queue2; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); data[i+2] += restart2; i += IGB_TX_QUEUE_STATS_LEN; @@ -2290,13 +2289,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev, for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); data[i] = ring->rx_stats.packets; data[i+1] = ring->rx_stats.bytes; data[i+2] = ring->rx_stats.drops; data[i+3] = ring->rx_stats.csum_err; data[i+4] = ring->rx_stats.alloc_failed; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); i += IGB_RX_QUEUE_STATS_LEN; } spin_unlock(&adapter->stats64_lock); @@ -2354,6 +2353,11 @@ static int igb_get_ts_info(struct net_device *dev, { struct igb_adapter *adapter = netdev_priv(dev); + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + switch (adapter->hw.mac.type) { case e1000_82575: info->so_timestamping = @@ -2375,11 +2379,6 @@ static int igb_get_ts_info(struct net_device *dev, SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - if (adapter->ptp_clock) - info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -2588,7 +2587,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 ipcnfg, eeer, ret_val; + u32 ret_val; u16 phy_data; if ((hw->mac.type < e1000_i350) || @@ -2597,16 +2596,25 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) edata->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { + igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; - ipcnfg = rd32(E1000_IPCNFG); - eeer = rd32(E1000_EEER); + eeer = rd32(E1000_EEER); - /* EEE status on negotiated link */ - if (ipcnfg & E1000_IPCNFG_EEE_1G_AN) - edata->advertised = ADVERTISED_1000baseT_Full; + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) + edata->eee_active = true; - if (ipcnfg & E1000_IPCNFG_EEE_100M_AN) - edata->advertised |= ADVERTISED_100baseT_Full; + if (eeer & E1000_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + } /* EEE Link Partner Advertised */ switch (hw->mac.type) { @@ -2617,8 +2625,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) return -ENODATA; edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); - break; + case e1000_i354: case e1000_i210: case e1000_i211: ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, @@ -2634,12 +2642,10 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) break; } - if (eeer & E1000_EEER_EEE_NEG) - edata->eee_active = true; - edata->eee_enabled = !hw->dev_spec._82575.eee_disable; - if (eeer & E1000_EEER_TX_LPI_EN) + if ((hw->mac.type == e1000_i354) && + (edata->eee_enabled)) edata->tx_lpi_enabled = true; /* Report correct negotiated EEE status for devices that @@ -2687,9 +2693,10 @@ static int igb_set_eee(struct net_device *netdev, return -EINVAL; } - if (eee_curr.advertised != edata->advertised) { + if (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { dev_err(&adapter->pdev->dev, - "Setting EEE Advertisement is not supported\n"); + "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); return -EINVAL; } @@ -2699,9 +2706,14 @@ static int igb_set_eee(struct net_device *netdev, return -EINVAL; } + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { hw->dev_spec._82575.eee_disable = !edata->eee_enabled; - igb_set_eee_i350(hw); + adapter->flags |= IGB_FLAG_EEE; + if (hw->mac.type == e1000_i350) + igb_set_eee_i350(hw); + else + igb_set_eee_i354(hw); /* reset link */ if (netif_running(netdev)) @@ -2779,9 +2791,11 @@ static int igb_get_module_eeprom(struct net_device *netdev, /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ for (i = 0; i < last_word - first_word + 1; i++) { status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); - if (status != E1000_SUCCESS) + if (status != E1000_SUCCESS) { /* Error occurred while reading module */ + kfree(dataword); return -EIO; + } be16_to_cpus(&dataword[i]); } diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index e0af5bc61613..8333f67acf96 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 46d31a49f5ea..30198185d19a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. + Copyright(c) 2007-2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -70,7 +69,7 @@ char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; static const char igb_copyright[] = - "Copyright (c) 2007-2013 Intel Corporation."; + "Copyright (c) 2007-2014 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, @@ -752,6 +751,28 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) } } +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + } + + return value; +} + /** * igb_write_ivar - configure ivar for given MSI-X vector * @hw: pointer to the HW structure @@ -1014,6 +1035,12 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + if (q_vector->tx.ring) adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; @@ -1111,16 +1138,18 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) for (i = 0; i < numvecs; i++) adapter->msix_entries[i].entry = i; - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, - numvecs); - if (err == 0) + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) return; igb_reset_interrupt_capability(adapter); /* If we can't do MSI-X, try MSI */ msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; #ifdef CONFIG_PCI_IOV /* disable SR-IOV for non MSI-X configurations */ if (adapter->vf_data) { @@ -1726,6 +1755,10 @@ int igb_up(struct igb_adapter *adapter) hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + return 0; } @@ -1974,6 +2007,21 @@ void igb_reset(struct igb_adapter *adapter) } } #endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw); + break; + case e1000_i354: + igb_set_eee_i354(hw); + break; + default: + break; + } + } if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); @@ -2560,23 +2608,36 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); - switch (hw->mac.type) { - case e1000_i350: - case e1000_i210: - case e1000_i211: - igb_set_eee_i350(hw); - break; - case e1000_i354: - if (hw->phy.media_type == e1000_media_type_copper) { + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: if ((rd32(E1000_CTRL_EXT) & - E1000_CTRL_EXT_LINK_MODE_SGMII)) - igb_set_eee_i354(hw); + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; } - break; - default: - break; } - pm_runtime_put_noidle(&pdev->dev); return 0; @@ -2591,7 +2652,7 @@ err_eeprom: iounmap(hw->flash_address); err_sw_init: igb_clear_interrupt_scheme(adapter); - iounmap(hw->hw_addr); + pci_iounmap(pdev, hw->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@ -2758,7 +2819,7 @@ static void igb_remove(struct pci_dev *pdev) igb_disable_sriov(pdev); #endif - iounmap(hw->hw_addr); + pci_iounmap(pdev, hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, @@ -3510,6 +3571,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, vmolr = rd32(E1000_VMOLR(vfn)); vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + if (hw->mac.type == e1000_i350) { + u32 dvmolr; + + dvmolr = rd32(E1000_DVMOLR(vfn)); + dvmolr |= E1000_DVMOLR_STRVLAN; + wr32(E1000_DVMOLR(vfn), dvmolr); + } if (aupe) vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ else @@ -4158,6 +4226,15 @@ static void igb_watchdog_task(struct work_struct *work) (ctrl & E1000_CTRL_RFCE) ? "RX" : (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + /* check if SmartSpeed worked */ igb_check_downshift(hw); if (phy->speed_downgraded) @@ -4306,8 +4383,7 @@ enum latency_range { * were determined based on theoretical maximum wire speed and testing * data, in order to minimize response time while increasing bulk * throughput. - * This functionality is controlled by the InterruptThrottleRate module - * parameter (see igb_param.c) + * This functionality is controlled by ethtool's coalescing settings. * NOTE: This function is called only when operating in a multiqueue * receive environment. **/ @@ -4381,8 +4457,7 @@ clear_counts: * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see igb_param.c) + * This functionality is controlled by ethtool's coalescing settings. * NOTE: These calculations are only valid when operating in a single- * queue environment. **/ @@ -4546,7 +4621,7 @@ static int igb_tso(struct igb_ring *tx_ring, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -4602,12 +4677,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) } else { u8 l4_hdr = 0; switch (first->protocol) { - case __constant_htons(ETH_P_IP): + case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; - case __constant_htons(ETH_P_IPV6): + case htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; @@ -4905,12 +4980,11 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - skb_tx_timestamp(skb); - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!(adapter->ptp_tx_skb)) { + if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; @@ -4921,6 +4995,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, } } + skb_tx_timestamp(skb); + if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); @@ -5127,10 +5203,10 @@ void igb_update_stats(struct igb_adapter *adapter, } do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -5143,10 +5219,10 @@ void igb_update_stats(struct igb_adapter *adapter, for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -6620,7 +6696,9 @@ static inline void igb_rx_hash(struct igb_ring *ring, struct sk_buff *skb) { if (ring->netdev->features & NETIF_F_RXHASH) - skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); } /** @@ -6690,7 +6768,7 @@ static unsigned int igb_get_headlen(unsigned char *data, hdr.network += ETH_HLEN; /* handle any vlan tag if present */ - if (protocol == __constant_htons(ETH_P_8021Q)) { + if (protocol == htons(ETH_P_8021Q)) { if ((hdr.network - data) > (max_len - VLAN_HLEN)) return max_len; @@ -6699,7 +6777,7 @@ static unsigned int igb_get_headlen(unsigned char *data, } /* handle L3 protocols */ - if (protocol == __constant_htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) return max_len; @@ -6713,7 +6791,7 @@ static unsigned int igb_get_headlen(unsigned char *data, /* record next protocol if header is present */ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) nexthdr = hdr.ipv4->protocol; - } else if (protocol == __constant_htons(ETH_P_IPV6)) { + } else if (protocol == htons(ETH_P_IPV6)) { if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) return max_len; @@ -6903,7 +6981,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); - do { + while (likely(total_packets < budget)) { union e1000_adv_rx_desc *rx_desc; /* return some buffers to hardware, one at a time is too slow */ @@ -6955,7 +7033,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* update budget accounting */ total_packets++; - } while (likely(total_packets < budget)); + } /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; @@ -7114,8 +7192,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); case SIOCSHWTSTAMP: - return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); + return igb_ptp_set_ts_config(netdev, ifr); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 5a54e3dc535d..2cca8fd5e574 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -12,9 +12,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/device.h> @@ -75,6 +74,8 @@ #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) #define IGB_NBITS_82580 40 +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); + /* SYSTIM read access for the 82576 */ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) { @@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp, * This work function polls the TSYNCTXCTL valid bit to determine when a * timestamp has been taken for the current stored skb. **/ -void igb_ptp_tx_work(struct work_struct *work) +static void igb_ptp_tx_work(struct work_struct *work) { struct igb_adapter *adapter = container_of(work, struct igb_adapter, ptp_tx_work); @@ -386,6 +387,7 @@ void igb_ptp_tx_work(struct work_struct *work) IGB_PTP_TX_TIMEOUT)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); adapter->tx_hwtstamp_timeouts++; dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); return; @@ -466,7 +468,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) * available, then it must have been for this skb here because we only * allow only one such packet into the queue. **/ -void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; @@ -479,6 +481,7 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); } /** @@ -540,10 +543,26 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, } /** - * igb_ptp_hwtstamp_ioctl - control hardware time stamping + * igb_ptp_get_ts_config - get hardware time stamping config + * @netdev: + * @ifreq: + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} +/** + * igb_ptp_set_ts_config - control hardware time stamping * @netdev: * @ifreq: - * @cmd: * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't case any overhead @@ -557,12 +576,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, * not supported, with the exception of "all V2 events regardless of * level 2 or 4". **/ -int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd) +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config config; + struct hwtstamp_config *config = &adapter->tstamp_config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 tsync_rx_cfg = 0; @@ -570,14 +588,14 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, bool is_l2 = false; u32 regval; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + if (copy_from_user(config, ifr->ifr_data, sizeof(*config))) return -EFAULT; /* reserved for future extensions */ - if (config.flags) + if (config->flags) return -EINVAL; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; case HWTSTAMP_TX_ON: @@ -586,7 +604,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; break; @@ -610,7 +628,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; is_l4 = true; break; @@ -621,12 +639,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, */ if (hw->mac.type != e1000_82576) { tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; } /* fall through */ default: - config.rx_filter = HWTSTAMP_FILTER_NONE; + config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } @@ -643,7 +661,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; is_l2 = true; is_l4 = true; @@ -707,7 +725,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, regval = rd32(E1000_RXSTMPL); regval = rd32(E1000_RXSTMPH); - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } @@ -798,7 +816,7 @@ void igb_ptp_init(struct igb_adapter *adapter) /* Initialize the time sync interrupts for devices that support it. */ if (hw->mac.type >= e1000_82580) { - wr32(E1000_TSIM, E1000_TSIM_TXTS); + wr32(E1000_TSIM, TSYNC_INTERRUPTS); wr32(E1000_IMS, E1000_IMS_TS); } @@ -841,6 +859,7 @@ void igb_ptp_stop(struct igb_adapter *adapter) if (adapter->ptp_tx_skb) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); } if (adapter->ptp_clock) { @@ -864,6 +883,9 @@ void igb_ptp_reset(struct igb_adapter *adapter) if (!(adapter->flags & IGB_FLAG_PTP)) return; + /* reset the tstamp_config */ + memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); + switch (adapter->hw.mac.type) { case e1000_82576: /* Dial the nominal frequency. */ @@ -876,7 +898,7 @@ void igb_ptp_reset(struct igb_adapter *adapter) case e1000_i211: /* Enable the timer functions and interrupts. */ wr32(E1000_TSAUXC, 0x0); - wr32(E1000_TSIM, E1000_TSIM_TXTS); + wr32(E1000_TSIM, TSYNC_INTERRUPTS); wr32(E1000_IMS, E1000_IMS_TS); break; default: diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 675435fc2e53..b7ab03a2f28f 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) for (i = 0; i < 3; i++) adapter->msix_entries[i].entry = i; - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, 3); + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, 3, 3); } - if (err) { + if (err < 0) { /* MSI-X failed */ dev_err(&adapter->pdev->dev, "Failed to initialize MSI-X interrupts.\n"); @@ -2014,12 +2014,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, if (skb->ip_summed == CHECKSUM_PARTIAL) { switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case htons(ETH_P_IP): tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; break; - case __constant_htons(ETH_P_IPV6): + case htons(ETH_P_IPV6): if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; break; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 57e390cbe6d0..f42c201f727f 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) int tso; if (test_bit(__IXGB_DOWN, &adapter->flags)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len <= 0) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tso = ixgb_tso(adapter, skb); if (tso < 0) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 0186ea2969fe..55c53a1cbb62 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -765,6 +766,7 @@ struct ixgbe_adapter { struct ptp_clock_info ptp_caps; struct work_struct ptp_tx_work; struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; unsigned long ptp_tx_start; unsigned long last_overflow_check; unsigned long last_rx_ptp_check; @@ -806,10 +808,12 @@ enum ixgbe_state_t { __IXGBE_TESTING, __IXGBE_RESETTING, __IXGBE_DOWN, + __IXGBE_DISABLED, __IXGBE_REMOVING, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, __IXGBE_PTP_RUNNING, + __IXGBE_PTP_TX_IN_PROGRESS, }; struct ixgbe_cb { @@ -884,7 +888,6 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id); void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); -bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); @@ -958,8 +961,8 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, rx_ring->last_rx_timestamp = jiffies; } -int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, - int cmd); +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); +int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index a26f3fee4f35..4c78ea8946c1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -57,10 +58,12 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, **/ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); u16 pcie_devctl2; + if (ixgbe_removed(hw->hw_addr)) + return; + /* only take action if timeout value is defaulted to 0 */ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) goto out; @@ -79,11 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) * directly in order to set the completion timeout value for * 16ms to 55ms */ - pci_read_config_word(adapter->pdev, - IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); + pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; - pci_write_config_word(adapter->pdev, - IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); + ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; @@ -100,6 +101,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); @@ -201,8 +203,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } - hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE; - /* set the completion timeout for interface */ if (ret_val == 0) ixgbe_set_pcie_completion_timeout(hw); @@ -1237,14 +1237,14 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) } /** - * ixgbe_set_rxpba_82598 - Configure packet buffers + * ixgbe_set_rxpba_82598 - Initialize RX packet buffer * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure packet buffers. - */ -static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, - int strategy) + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy) { u32 rxpktsize = IXGBE_RXPBSIZE_64KB; u8 i = 0; @@ -1315,7 +1315,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .release_swfw_sync = &ixgbe_release_swfw_sync, .get_thermal_sensor_data = NULL, .init_thermal_sensor_thresh = NULL, - .mng_fw_enabled = NULL, + .prot_autoc_read = &prot_autoc_read_generic, + .prot_autoc_write = &prot_autoc_write_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_82598 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index edda6814108c..f32b3dd1ba8e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -63,8 +64,10 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); +static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); -static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) { u32 fwsm, manc, factps; @@ -91,7 +94,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) * and MNG not enabled */ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && - !hw->mng_fw_enabled) { + !ixgbe_mng_enabled(hw)) { mac->ops.disable_tx_laser = &ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = @@ -122,7 +125,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val = 0; u16 list_offset, data_offset, data_value; - bool got_lock = false; if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); @@ -160,30 +162,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); - /* Need SW/FW semaphore around AUTOC writes if LESM on, - * likewise reset_pipeline requires lock as it also writes - * AUTOC. - */ - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) - goto setup_sfp_out; - - got_lock = true; - } - /* Restart DSP and set SFI mode */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) | - IXGBE_AUTOC_LMS_10G_SERIAL)); - hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - ret_val = ixgbe_reset_pipeline_82599(hw); - - if (got_lock) { - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - got_lock = false; - } + ret_val = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, + false); if (ret_val) { hw_dbg(hw, " sfp module setup not complete\n"); @@ -207,6 +189,81 @@ setup_sfp_err: return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; } +/** + * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: Return the if we locked for this read. + * @reg_val: Value we read from AUTOC + * + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next + * prot_autoc_write_82599(). Note, that locked can only be true in cases + * where this function doesn't return an error. + **/ +static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, + u32 *reg_val) +{ + s32 ret_val; + + *locked = false; + /* If LESM is on then we need to hold the SW/FW semaphore. */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + *locked = true; + } + + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return 0; +} + +/** + * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous proc_autoc_read_82599. + * + * This part (82599) may need to hold a the SW/FW lock around all writes to + * AUTOC. Likewise after a write we need to do a pipeline reset. + **/ +static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) +{ + s32 ret_val = 0; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* We only need to get the lock if: + * - We didn't do it already (in the read part of a read-modify-write) + * - LESM is enabled. + */ + if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + locked = true; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ret_val = ixgbe_reset_pipeline_82599(hw); + +out: + /* Free the SW/FW semaphore as we either grabbed it here or + * already had it when this function was called. + */ + if (locked) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + return ret_val; +} + static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -216,6 +273,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); @@ -456,12 +514,20 @@ out: * * Disables link, should be called during D3 power down sequence. * - */ + **/ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) { - u32 autoc2_reg; + u32 autoc2_reg, fwsm; + u16 ee_ctrl_2 = 0; + + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); - if (!hw->mng_fw_enabled && !hw->wol_enabled) { + /* Check to see if MNG FW could be enabled */ + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + + if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) && + !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); @@ -542,6 +608,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + /* Disable tx laser; allow 100us to go dark per spec */ esdp_reg |= IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); @@ -582,6 +652,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) **/ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + if (hw->mac.autotry_restart) { ixgbe_disable_tx_laser_multispeed_fiber(hw); ixgbe_enable_tx_laser_multispeed_fiber(hw); @@ -590,75 +664,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) } /** - * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber - * @hw: pointer to hardware structure - * @speed: link speed to set - * - * We set the module speed differently for fixed fiber. For other - * multi-speed devices we don't have an error value so here if we - * detect an error we just log it and exit. - */ -static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed) -{ - s32 status; - u8 rs, eeprom_data; - - switch (speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - /* one bit mask same as setting on */ - rs = IXGBE_SFF_SOFT_RS_SELECT_10G; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - rs = IXGBE_SFF_SOFT_RS_SELECT_1G; - break; - default: - hw_dbg(hw, "Invalid fixed module speed\n"); - return; - } - - /* Set RS0 */ - status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - &eeprom_data); - if (status) { - hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); - goto out; - } - - eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; - - status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - eeprom_data); - if (status) { - hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); - goto out; - } - - /* Set RS1 */ - status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - &eeprom_data); - if (status) { - hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); - goto out; - } - - eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; - - status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - eeprom_data); - if (status) { - hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); - goto out; - } -out: - return; -} - -/** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed @@ -768,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, /* Set the module link speed */ switch (hw->phy.media_type) { - case ixgbe_media_type_fiber_fixed: - ixgbe_set_fiber_fixed_speed(hw, - IXGBE_LINK_SPEED_1GB_FULL); - break; case ixgbe_media_type_fiber: esdp_reg &= ~IXGBE_ESDP_SDP5; esdp_reg |= IXGBE_ESDP_SDP5_DIR; @@ -941,8 +942,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) - hw_dbg(hw, "Smartspeed has downgraded the link speed from " - "the maximum advertised\n"); + hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); return status; } @@ -958,16 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { + bool autoneg = false; s32 status = 0; - u32 autoc, pma_pmd_1g, link_mode, start_autoc; + u32 pma_pmd_1g, link_mode, links_reg, i; u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - u32 orig_autoc = 0; u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; - u32 links_reg; - u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - bool got_lock = false; - bool autoneg = false; + + /* holds the value of AUTOC register at this current point in time */ + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + /* holds the cached value of AUTOC register */ + u32 orig_autoc = 0; + /* temporary variable used for comparison purposes */ + u32 autoc = current_autoc; /* Check to see if speed passed in is supported. */ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, @@ -984,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) - autoc = hw->mac.orig_autoc; + orig_autoc = hw->mac.orig_autoc; else - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + orig_autoc = autoc; - orig_autoc = autoc; - start_autoc = hw->mac.cached_autoc; link_mode = autoc & IXGBE_AUTOC_LMS_MASK; pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; @@ -1029,28 +1030,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, } } - if (autoc != start_autoc) { - /* Need SW/FW semaphore around AUTOC writes if LESM is on, - * likewise reset_pipeline requires us to hold this lock as - * it also writes to AUTOC. - */ - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - status = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (status != 0) - goto out; - - got_lock = true; - } - + if (autoc != current_autoc) { /* Restart link */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); - hw->mac.cached_autoc = autoc; - ixgbe_reset_pipeline_82599(hw); - - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); + status = hw->mac.ops.prot_autoc_write(hw, autoc, false); + if (status) + goto out; /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { @@ -1068,8 +1052,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, "Autoneg did not " - "complete.\n"); + hw_dbg(hw, "Autoneg did not complete.\n"); } } } @@ -1117,7 +1100,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; - u32 ctrl, i, autoc2; + u32 ctrl, i, autoc, autoc2; u32 curr_lms; bool link_up = false; @@ -1151,11 +1134,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) hw->phy.ops.reset(hw); /* remember AUTOC from before we reset */ - if (hw->mac.cached_autoc) - curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK; - else - curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & - IXGBE_AUTOC_LMS_MASK; + curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; mac_reset_top: /* @@ -1205,7 +1184,7 @@ mac_reset_top: * stored off yet. Otherwise restore the stored original * values since the reset operation sets back to defaults. */ - hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); /* Enable link if disabled in NVM */ @@ -1216,7 +1195,7 @@ mac_reset_top: } if (hw->mac.orig_link_settings_stored == false) { - hw->mac.orig_autoc = hw->mac.cached_autoc; + hw->mac.orig_autoc = autoc; hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = true; } else { @@ -1227,34 +1206,18 @@ mac_reset_top: * Likewise if we support WoL we don't want change the * LMS state either. */ - if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) || + if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || hw->wol_enabled) hw->mac.orig_autoc = (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | curr_lms; - if (hw->mac.cached_autoc != hw->mac.orig_autoc) { - /* Need SW/FW semaphore around AUTOC writes if LESM is - * on, likewise reset_pipeline requires us to hold - * this lock as it also writes to AUTOC. - */ - bool got_lock = false; - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - status = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (status) - goto reset_hw_out; - - got_lock = true; - } - - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); - hw->mac.cached_autoc = hw->mac.orig_autoc; - ixgbe_reset_pipeline_82599(hw); - - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); + if (autoc != hw->mac.orig_autoc) { + status = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc, + false); + if (status) + goto reset_hw_out; } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != @@ -1634,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 bucket_hash = 0; + u32 bucket_hash = 0, hi_dword = 0; + int i; /* Apply masks to input data */ - input->dword_stream[0] &= input_mask->dword_stream[0]; - input->dword_stream[1] &= input_mask->dword_stream[1]; - input->dword_stream[2] &= input_mask->dword_stream[2]; - input->dword_stream[3] &= input_mask->dword_stream[3]; - input->dword_stream[4] &= input_mask->dword_stream[4]; - input->dword_stream[5] &= input_mask->dword_stream[5]; - input->dword_stream[6] &= input_mask->dword_stream[6]; - input->dword_stream[7] &= input_mask->dword_stream[7]; - input->dword_stream[8] &= input_mask->dword_stream[8]; - input->dword_stream[9] &= input_mask->dword_stream[9]; - input->dword_stream[10] &= input_mask->dword_stream[10]; + for (i = 0; i <= 10; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = ntohl(input->dword_stream[0]); /* generate common hash dword */ - hi_hash_dword = ntohl(input->dword_stream[1] ^ - input->dword_stream[2] ^ - input->dword_stream[3] ^ - input->dword_stream[4] ^ - input->dword_stream[5] ^ - input->dword_stream[6] ^ - input->dword_stream[7] ^ - input->dword_stream[8] ^ - input->dword_stream[9] ^ - input->dword_stream[10]); + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = ntohl(hi_dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); @@ -1681,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ - IXGBE_COMPUTE_BKT_HASH_ITERATION(1); - IXGBE_COMPUTE_BKT_HASH_ITERATION(2); - IXGBE_COMPUTE_BKT_HASH_ITERATION(3); - IXGBE_COMPUTE_BKT_HASH_ITERATION(4); - IXGBE_COMPUTE_BKT_HASH_ITERATION(5); - IXGBE_COMPUTE_BKT_HASH_ITERATION(6); - IXGBE_COMPUTE_BKT_HASH_ITERATION(7); - IXGBE_COMPUTE_BKT_HASH_ITERATION(8); - IXGBE_COMPUTE_BKT_HASH_ITERATION(9); - IXGBE_COMPUTE_BKT_HASH_ITERATION(10); - IXGBE_COMPUTE_BKT_HASH_ITERATION(11); - IXGBE_COMPUTE_BKT_HASH_ITERATION(12); - IXGBE_COMPUTE_BKT_HASH_ITERATION(13); - IXGBE_COMPUTE_BKT_HASH_ITERATION(14); - IXGBE_COMPUTE_BKT_HASH_ITERATION(15); + for (i = 1; i <= 15; i++) + IXGBE_COMPUTE_BKT_HASH_ITERATION(i); /* * Limit hash to 13 bits since max bucket count is 8K. @@ -2001,7 +1936,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) /* We need to run link autotry after the driver loads */ hw->mac.autotry_restart = true; - hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE; if (ret_val == 0) ret_val = ixgbe_verify_fw_version_82599(hw); @@ -2260,7 +2194,7 @@ fw_version_err: * Returns true if the LESM FW module is present and enabled. Otherwise * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ -bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { bool lesm_enabled = false; u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; @@ -2366,7 +2300,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, * full pipeline reset. Note - We must hold the SW/FW semaphore before writing * to AUTOC, so this function assumes the semaphore is held. **/ -s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) { s32 ret_val; u32 anlp1_reg = 0; @@ -2380,11 +2314,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } - autoc_reg = hw->mac.cached_autoc; + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); /* Wait for AN to leave state 0 */ for (i = 0; i < 10; i++) { @@ -2565,7 +2500,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .release_swfw_sync = &ixgbe_release_swfw_sync, .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, - .mng_fw_enabled = &ixgbe_mng_enabled, + .prot_autoc_read = &prot_autoc_read_82599, + .prot_autoc_write = &prot_autoc_write_82599, }; static struct ixgbe_eeprom_operations eeprom_ops_82599 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index b5c434b617b1..24fba39e194e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -72,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) bool link_up; switch (hw->phy.media_type) { - case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: hw->mac.ops.check_link(hw, &speed, &link_up, false); /* if link is down, assume supported */ @@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) s32 ret_val = 0; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; - bool got_lock = false; + bool locked = false; /* * Validate the requested mode. Strict IEEE mode does not allow @@ -139,11 +139,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) * we link at 10G, the 1G advertisement is harmless and vice versa. */ switch (hw->phy.media_type) { - case ixgbe_media_type_fiber_fixed: - case ixgbe_media_type_fiber: case ixgbe_media_type_backplane: + /* some MAC's need RMW protection on AUTOC */ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); + if (ret_val) + goto out; + + /* only backplane uses autoc so fall though */ + case ixgbe_media_type_fiber: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); + break; case ixgbe_media_type_copper: hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, @@ -240,27 +245,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) * LESM is on, likewise reset_pipeline requries the lock as * it also writes AUTOC. */ - if ((hw->mac.type == ixgbe_mac_82599EB) && - ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) - goto out; - - got_lock = true; - } - - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); - - if (hw->mac.type == ixgbe_mac_82599EB) - ixgbe_reset_pipeline_82599(hw); - - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); + if (ret_val) + goto out; } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - ixgbe_device_supports_autoneg_fc(hw)) { + ixgbe_device_supports_autoneg_fc(hw)) { hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, reg_cu); } @@ -656,20 +646,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) **/ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; - struct ixgbe_mac_info *mac = &hw->mac; u16 link_status; hw->bus.type = ixgbe_bus_type_pci_express; /* Get the negotiated link width and speed from PCI config space */ - pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, - &link_status); + link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); hw->bus.width = ixgbe_convert_bus_width(link_status); hw->bus.speed = ixgbe_convert_bus_speed(link_status); - mac->ops.set_lan_id(hw); + hw->mac.ops.set_lan_id(hw); return 0; } @@ -2406,7 +2393,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ - case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); @@ -2437,6 +2423,53 @@ out: } /** + * ixgbe_pcie_timeout_poll - Return number of times to poll for completion + * @hw: pointer to hardware structure + * + * System-wide timeout range is encoded in PCIe Device Control2 register. + * + * Add 10% to specified maximum and return the number of times to poll for + * completion timeout, in units of 100 microsec. Never return less than + * 800 = 80 millisec. + **/ +static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) +{ + s16 devctl2; + u32 pollcnt; + + devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; + + switch (devctl2) { + case IXGBE_PCIDEVCTRL2_65_130ms: + pollcnt = 1300; /* 130 millisec */ + break; + case IXGBE_PCIDEVCTRL2_260_520ms: + pollcnt = 5200; /* 520 millisec */ + break; + case IXGBE_PCIDEVCTRL2_1_2s: + pollcnt = 20000; /* 2 sec */ + break; + case IXGBE_PCIDEVCTRL2_4_8s: + pollcnt = 80000; /* 8 sec */ + break; + case IXGBE_PCIDEVCTRL2_17_34s: + pollcnt = 34000; /* 34 sec */ + break; + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + default: + pollcnt = 800; /* 80 millisec minimum */ + break; + } + + /* add 10% to spec maximum */ + return (pollcnt * 11) / 10; +} + +/** * ixgbe_disable_pcie_master - Disable PCI-express master access * @hw: pointer to hardware structure * @@ -2447,16 +2480,16 @@ out: **/ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; s32 status = 0; - u32 i; + u32 i, poll; u16 value; /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); /* Exit if master requests are blocked */ - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || + ixgbe_removed(hw->hw_addr)) goto out; /* Poll for master request bit to clear */ @@ -2481,10 +2514,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * Before proceeding, make sure that the PCIe block does not have * transactions pending. */ - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { udelay(100); - pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, - &value); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + goto out; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) goto out; } @@ -2564,6 +2599,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) } /** + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @reg_val: Value we read from AUTOC + * @locked: bool to indicate whether the SW/FW lock should be taken. Never + * true in this the generic case. + * + * The default case requires no protection so just to the register read. + **/ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = false; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return 0; +} + +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + **/ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return 0; +} + +/** * ixgbe_disable_rx_buff_generic - Stops the receive data path * @hw: pointer to hardware structure * @@ -2641,6 +2705,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); s32 ret_val = 0; + bool locked = false; /* * Link must be up to auto-blink the LEDs; @@ -2649,28 +2714,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { - /* Need the SW/FW semaphore around AUTOC writes if 82599 and - * LESM is on. - */ - bool got_lock = false; - - if ((hw->mac.type == ixgbe_mac_82599EB) && - ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) - goto out; + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val) + goto out; - got_lock = true; - } autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val) + goto out; + IXGBE_WRITE_FLUSH(hw); - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); usleep_range(10000, 20000); } @@ -2690,33 +2746,21 @@ out: **/ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); s32 ret_val = 0; - bool got_lock = false; + bool locked = false; - /* Need the SW/FW semaphore around AUTOC writes if 82599 and - * LESM is on. - */ - if ((hw->mac.type == ixgbe_mac_82599EB) && - ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) - goto out; - - got_lock = true; - } + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val) + goto out; autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - if (hw->mac.type == ixgbe_mac_82599EB) - ixgbe_reset_pipeline_82599(hw); - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val) + goto out; led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); @@ -2817,7 +2861,6 @@ san_mac_addr_clr: **/ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; u16 msix_count = 1; u16 max_msix_count; u16 pcie_offset; @@ -2836,7 +2879,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) return msix_count; } - pci_read_config_word(adapter->pdev, pcie_offset, &msix_count); + msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); + if (ixgbe_removed(hw->hw_addr)) + msix_count = 0; msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; /* MSI-X count is zero-based in HW */ @@ -2868,6 +2913,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + if (ixgbe_removed(hw->hw_addr)) + goto done; + if (!mpsar_lo && !mpsar_hi) goto done; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index f2e3919750ec..f12c40fb5537 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -98,6 +99,10 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, bool *link_up, bool link_up_wait_to_complete); s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); + +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); @@ -106,10 +111,10 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); +bool ixgbe_mng_enabled(struct ixgbe_hw *hw); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy); -s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 #define IXGBE_EMC_INTERNAL_DATA 0x00 @@ -125,6 +130,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); #define IXGBE_FAILED_READ_REG 0xffffffffU +#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define IXGBE_FAILED_READ_CFG_WORD 0xffffU + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); static inline bool ixgbe_removed(void __iomem *addr) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 05e23b80b5e3..bdb99b3b0f30 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index d71d9ce3e394..d5a1e3db0774 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index c5933f6dceee..472b0f450bf9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 043307024c4a..6c55c14d082a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -1127,10 +1128,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, } do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; #ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; @@ -1155,10 +1156,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, } do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; #ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; @@ -1247,6 +1248,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) struct ixgbe_hw *hw = &adapter->hw; bool link_up; u32 link_speed = 0; + + if (ixgbe_removed(hw->hw_addr)) { + *data = 1; + return 1; + } *data = 0; hw->mac.ops.check_link(hw, &link_speed, &link_up, true); @@ -1969,6 +1975,7 @@ static void ixgbe_diag_test(struct net_device *netdev, data[1] = 1; data[2] = 1; data[3] = 1; + data[4] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; return; } @@ -1988,6 +1995,7 @@ static void ixgbe_diag_test(struct net_device *netdev, data[1] = 1; data[2] = 1; data[3] = 1; + data[4] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__IXGBE_TESTING, &adapter->state); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index f58db453a97e..25a3dfef33e8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -407,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { /* return 0 to bypass going to ULD for DDPed data */ - case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): /* update length of DDPed data */ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); rc = 0; break; /* unmap the sg list when FCPRSP is received */ - case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); ddp->err = ddp_err; @@ -421,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ddp->sgc = 0; /* fall through */ /* if DDP length is present pass it through to ULD */ - case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): /* update length of DDPed data */ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); if (ddp->len) rc = ddp->len; break; /* no match will return as an error */ - case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): default: break; } @@ -585,7 +586,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, struct dma_pool *pool; char pool_name[32]; - snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN, PAGE_SIZE); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 3a02759b5e95..b16cc786750d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 32e3eaaa160a..2067d392cc3d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -698,7 +699,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, int vectors) { - int err, vector_threshold; + int vector_threshold; /* We'll want at least 2 (vector_threshold): * 1) TxQ[0] + RxQ[0] handler @@ -712,18 +713,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); - if (!err) /* Success in acquiring all requested vectors. */ - break; - else if (err < 0) - vectors = 0; /* Nasty failure, quit now */ - else /* err == number of vectors we should try again with */ - vectors = err; - } + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); - if (vectors < vector_threshold) { + if (vectors < 0) { /* Can't allocate enough MSI-X interrupts? Oh well. * This just means we'll go with either a single MSI * vector or fall back to legacy interrupts. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 18076c4178b4..8436c651b735 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -67,7 +68,7 @@ static char ixgbe_default_device_descr[] = #define DRV_VERSION "3.19.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2013 Intel Corporation."; + "Copyright (c) 1999-2014 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, @@ -151,6 +152,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); + static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) { @@ -169,6 +172,9 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, return -1; pcie_capability_read_word(parent_dev, reg, value); + if (*value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) + return -1; return 0; } @@ -313,6 +319,57 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ixgbe_remove_adapter(hw); } +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD) { + ixgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 value; + + if (ixgbe_removed(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef CONFIG_PCI_IOV +static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 value; + + if (ixgbe_removed(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_DWORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ + +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (ixgbe_removed(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) { BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); @@ -1264,7 +1321,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, struct sk_buff *skb) { if (ring->netdev->features & NETIF_F_RXHASH) - skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); } #ifdef IXGBE_FCOE @@ -1480,7 +1539,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, hdr.network += ETH_HLEN; /* handle any vlan tag if present */ - if (protocol == __constant_htons(ETH_P_8021Q)) { + if (protocol == htons(ETH_P_8021Q)) { if ((hdr.network - data) > (max_len - VLAN_HLEN)) return max_len; @@ -1489,7 +1548,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, } /* handle L3 protocols */ - if (protocol == __constant_htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) return max_len; @@ -1503,7 +1562,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, /* record next protocol if header is present */ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) nexthdr = hdr.ipv4->protocol; - } else if (protocol == __constant_htons(ETH_P_IPV6)) { + } else if (protocol == htons(ETH_P_IPV6)) { if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) return max_len; @@ -1511,7 +1570,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, nexthdr = hdr.ipv6->nexthdr; hlen = sizeof(struct ipv6hdr); #ifdef IXGBE_FCOE - } else if (protocol == __constant_htons(ETH_P_FCOE)) { + } else if (protocol == htons(ETH_P_FCOE)) { if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) return max_len; hlen = FCOE_HEADER_LEN; @@ -2026,7 +2085,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); - do { + while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; @@ -2101,7 +2160,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* update budget accounting */ total_rx_packets++; - } while (likely(total_rx_packets < budget)); + } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -2630,9 +2689,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: - if (eicr & IXGBE_EICR_ECC) - e_info(link, "Received unrecoverable ECC Err, please " - "reboot\n"); + if (eicr & IXGBE_EICR_ECC) { + e_info(link, "Received ECC Err, initiating reset\n"); + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { int reinit_count = 0; @@ -2846,9 +2908,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_sfp_event(adapter, eicr); /* Fall through */ case ixgbe_mac_X540: - if (eicr & IXGBE_EICR_ECC) - e_info(link, "Received unrecoverable ECC err, please " - "reboot\n"); + if (eicr & IXGBE_EICR_ECC) { + e_info(link, "Received ECC Err, initiating reset\n"); + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } ixgbe_check_overtemp_event(adapter, eicr); break; default: @@ -4590,8 +4655,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) static void ixgbe_up_complete(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - struct net_device *upper; - struct list_head *iter; int err; u32 ctrl_ext; @@ -4633,19 +4696,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) e_crit(drv, "Fan has stopped, replace the adapter\n"); } - /* enable transmits */ - netif_tx_start_all_queues(adapter->netdev); - - /* enable any upper devices */ - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) - netif_tx_start_all_queues(upper); - } - } - /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -5502,6 +5552,7 @@ static int ixgbe_resume(struct pci_dev *pdev) struct net_device *netdev = adapter->netdev; u32 err; + adapter->hw.hw_addr = adapter->io_addr; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* @@ -5515,6 +5566,8 @@ static int ixgbe_resume(struct pci_dev *pdev) e_dev_err("Cannot enable PCI device from suspend\n"); return err; } + smp_mb__before_clear_bit(); + clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); pci_wake_from_d3(pdev, false); @@ -5612,7 +5665,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) ixgbe_release_hw_control(adapter); - pci_disable_device(pdev); + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); return 0; } @@ -6016,6 +6070,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; + struct net_device *upper; + struct list_head *iter; u32 link_speed = adapter->link_speed; bool flow_rx, flow_tx; @@ -6067,6 +6123,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) netif_carrier_on(netdev); ixgbe_check_vf_rate_limit(adapter); + /* enable transmits */ + netif_tx_wake_all_queues(adapter->netdev); + + /* enable any upper devices */ + rtnl_lock(); + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_wake_all_queues(upper); + } + } + rtnl_unlock(); + /* update the default user priority for VFs */ ixgbe_update_default_up(adapter); @@ -6454,7 +6525,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -6514,12 +6585,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, } else { u8 l4_hdr = 0; switch (first->protocol) { - case __constant_htons(ETH_P_IP): + case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; - case __constant_htons(ETH_P_IPV6): + case htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; @@ -6794,9 +6865,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.network = skb_network_header(first->skb); /* Currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != __constant_htons(ETH_P_IPV6) || + if ((first->protocol != htons(ETH_P_IPV6) || hdr.ipv6->nexthdr != IPPROTO_TCP) && - (first->protocol != __constant_htons(ETH_P_IP) || + (first->protocol != htons(ETH_P_IP) || hdr.ipv4->protocol != IPPROTO_TCP)) return; @@ -6829,12 +6900,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring, * and write the value to source port portion of compressed dword */ if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) - common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); + common.port.src ^= th->dest ^ htons(ETH_P_8021Q); else common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; - if (first->protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == htons(ETH_P_IP)) { input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; } else { @@ -6900,8 +6971,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, * or FIP and we have FCoE enabled on the adapter */ switch (vlan_get_protocol(skb)) { - case __constant_htons(ETH_P_FCOE): - case __constant_htons(ETH_P_FIP): + case htons(ETH_P_FCOE): + case htons(ETH_P_FIP): adapter = netdev_priv(dev); if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) @@ -6962,7 +7033,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN check the next protocol and store the tag */ - } else if (protocol == __constant_htons(ETH_P_8021Q)) { + } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) @@ -6974,9 +7045,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } - skb_tx_timestamp(skb); - - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state))) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; @@ -6986,6 +7057,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, schedule_work(&adapter->ptp_tx_work); } + skb_tx_timestamp(skb); + #ifdef CONFIG_PCI_IOV /* * Use the l2switch_enable flag - would be false if the DMA @@ -7021,7 +7094,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #ifdef IXGBE_FCOE /* setup tx offload for FCoE */ - if ((protocol == __constant_htons(ETH_P_FCOE)) && + if ((protocol == htons(ETH_P_FCOE)) && (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { tso = ixgbe_fso(tx_ring, first, &hdr_len); if (tso < 0) @@ -7143,7 +7216,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) switch (cmd) { case SIOCSHWTSTAMP: - return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); + return ixgbe_ptp_set_ts_config(adapter, req); + case SIOCGHWTSTAMP: + return ixgbe_ptp_get_ts_config(adapter, req); default: return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } @@ -7234,10 +7309,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, if (ring) { do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } @@ -7250,10 +7325,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, if (ring) { do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } @@ -7792,6 +7867,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, case IXGBE_DEV_ID_82599_SFP: /* Only these subdevices could supports WOL */ switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599_SFP_WOL0: case IXGBE_SUBDEV_ID_82599_560FLR: /* only support first port */ if (hw->bus.func != 0) @@ -7969,10 +8045,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; - /* Cache if MNG FW is up so we don't have to read the REG later */ - if (hw->mac.ops.mng_fw_enabled) - hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw); - /* Make it possible the adapter to be woken up via WOL */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: @@ -8223,7 +8295,7 @@ skip_sriov: ixgbe_dbg_adapter_init(adapter); /* Need link setup for MNG FW, else wait for IXGBE_UP */ - if (hw->mng_fw_enabled && hw->mac.ops.setup_link) + if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); @@ -8244,7 +8316,8 @@ err_alloc_etherdev: pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: - pci_disable_device(pdev); + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); return err; } @@ -8313,7 +8386,8 @@ static void ixgbe_remove(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); } /** @@ -8331,6 +8405,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = adapter->netdev; #ifdef CONFIG_PCI_IOV + struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *bdev, *vfdev; u32 dw0, dw1, dw2, dw3; int vf, pos; @@ -8351,10 +8426,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, if (!pos) goto skip_bad_vf_detection; - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); + dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); + dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); + dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); + if (ixgbe_removed(hw->hw_addr)) + goto skip_bad_vf_detection; req_id = dw1 >> 16; /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ @@ -8417,14 +8494,20 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, skip_bad_vf_detection: #endif /* CONFIG_PCI_IOV */ + rtnl_lock(); netif_device_detach(netdev); - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; + } if (netif_running(netdev)) ixgbe_down(adapter); - pci_disable_device(pdev); + + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -8446,6 +8529,9 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) e_err(probe, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { + smp_mb__before_clear_bit(); + clear_bit(__IXGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index cc3101afd29f..f5c6af2b891b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index e44ff47659b5..a9b9ad69ed0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 132557c318f8..23f765263f12 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -98,6 +99,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) } /** + * ixgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return false since the link can not be blocked + * via this method. + **/ +bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +{ + u32 mmngc; + + /* If we don't have this bit, it can't be blocking */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); + if (mmngc & IXGBE_MMNGC_MNG_VETO) { + hw_dbg(hw, "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + +/** * ixgbe_get_phy_id - Get the phy type * @hw: pointer to hardware structure * @@ -172,6 +199,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) goto out; + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY @@ -476,6 +507,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) autoneg_reg); } + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + /* Restart PHY autonegotiation and wait for completion */ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); @@ -682,6 +717,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) autoneg_reg); } + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + /* Restart PHY autonegotiation and wait for completion */ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); @@ -759,6 +798,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) s32 ret_val = 0; u32 i; + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); /* reset the PHY and poll for completion */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index fffcbdd2bf0e..0bb047f751c2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -65,9 +66,6 @@ #define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 -#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 @@ -79,7 +77,6 @@ #define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 - /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 #define IXGBE_TAF_ASM_PAUSE 0x800 @@ -131,6 +128,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); +bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); /* PHY specific */ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 5184e2a1a7d8..63515a6f67fa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -492,6 +493,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); } /** @@ -511,13 +513,10 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) IXGBE_PTP_TX_TIMEOUT); u32 tsynctxctl; - /* we have to have a valid skb */ - if (!adapter->ptp_tx_skb) - return; - if (timeout) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); e_warn(drv, "clearing Tx Timestamp hang"); return; } @@ -576,14 +575,21 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, shhwtstamps->hwtstamp = ns_to_ktime(ns); } +int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + /** - * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping + * ixgbe_ptp_set_ts_config - control hardware time stamping * @adapter: pointer to adapter struct * @ifreq: ioctl data - * @cmd: particular ioctl requested * * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't case any overhead + * disable it when requested, although it shouldn't cause any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. @@ -599,8 +605,7 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, * Event mode. This more accurately tells the user what the hardware is going * to do anyways. */ -int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, - struct ifreq *ifr, int cmd) +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) { struct ixgbe_hw *hw = &adapter->hw; struct hwtstamp_config config; @@ -702,6 +707,10 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } @@ -809,6 +818,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); IXGBE_WRITE_FLUSH(hw); + /* Reset the saved tstamp_config */ + memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); + ixgbe_ptp_start_cyclecounter(adapter); spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -840,7 +852,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_X540: - snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); + snprintf(adapter->ptp_caps.name, + sizeof(adapter->ptp_caps.name), + "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; @@ -854,7 +868,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) adapter->ptp_caps.enable = ixgbe_ptp_enable; break; case ixgbe_mac_82599EB: - snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); + snprintf(adapter->ptp_caps.name, + sizeof(adapter->ptp_caps.name), + "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; @@ -911,6 +927,7 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) if (adapter->ptp_tx_skb) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); } if (adapter->ptp_clock) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index dff0977876f7..e6c68d396c99 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 8bd29190514e..139eaddfb2ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index e74ae3682733..ef6df3d6437e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 0d39cfc4a3bf..8a6ff2423f07 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -54,6 +55,7 @@ #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B @@ -1609,6 +1611,9 @@ enum { #define IXGBE_MACC_FS 0x00040000 #define IXGBE_MAC_RX2TX_LPBK 0x00000002 +/* Veto Bit definiton */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 + /* LINKS Bit Masks */ #define IXGBE_LINKS_KX_AN_COMP 0x80000000 #define IXGBE_LINKS_UP 0x40000000 @@ -1788,6 +1793,9 @@ enum { #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */ + #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS #define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ #endif @@ -1853,8 +1861,19 @@ enum { #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 #define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 +#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define IXGBE_PCIDEVCTRL2_50_100us 0x1 +#define IXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define IXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define IXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define IXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define IXGBE_PCIDEVCTRL2_1_2s 0xa +#define IXGBE_PCIDEVCTRL2_4_8s 0xd +#define IXGBE_PCIDEVCTRL2_17_34s 0xe + /* Number of 100 microseconds we wait for PCI Express master disable */ -#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 /* RAH */ #define IXGBE_RAH_VIND_MASK 0x003C0000 @@ -2645,7 +2664,6 @@ enum ixgbe_sfp_type { enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, - ixgbe_media_type_fiber_fixed, ixgbe_media_type_fiber_qsfp, ixgbe_media_type_fiber_lco, ixgbe_media_type_copper, @@ -2858,6 +2876,8 @@ struct ixgbe_mac_operations { s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); void (*release_swfw_sync)(struct ixgbe_hw *, u16); + s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); /* Link */ void (*disable_tx_laser)(struct ixgbe_hw *); @@ -2901,7 +2921,6 @@ struct ixgbe_mac_operations { s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); - bool (*mng_fw_enabled)(struct ixgbe_hw *hw); }; struct ixgbe_phy_operations { @@ -2957,7 +2976,6 @@ struct ixgbe_mac_info { u32 max_tx_queues; u32 max_rx_queues; u32 orig_autoc; - u32 cached_autoc; u32 orig_autoc2; bool orig_link_settings_stored; bool autotry_restart; @@ -3033,7 +3051,6 @@ struct ixgbe_hw { bool adapter_stopped; bool force_full_reset; bool allow_unsupported_sfp; - bool mng_fw_enabled; bool wol_enabled; }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 24b80a6cfca4..188a5974b85c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -20,6 +20,7 @@ the file called "COPYING". Contact Information: + Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -61,6 +62,7 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); @@ -187,7 +189,6 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) goto out; ret_val = ixgbe_start_hw_gen2(hw); - hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE; out: return ret_val; } @@ -854,7 +855,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_thermal_sensor_data = NULL, .init_thermal_sensor_thresh = NULL, - .mng_fw_enabled = NULL, + .prot_autoc_read = &prot_autoc_read_generic, + .prot_autoc_write = &prot_autoc_write_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index f68b78c732a8..1baecb60f065 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -530,41 +530,55 @@ static const u32 register_test_patterns[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF }; -#define REG_PATTERN_TEST(R, M, W) \ -{ \ - u32 pat, val, before; \ - for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ - before = readl(adapter->hw.hw_addr + R); \ - writel((register_test_patterns[pat] & W), \ - (adapter->hw.hw_addr + R)); \ - val = readl(adapter->hw.hw_addr + R); \ - if (val != (register_test_patterns[pat] & W & M)) { \ - hw_dbg(&adapter->hw, \ - "pattern test reg %04X failed: got " \ - "0x%08X expected 0x%08X\n", \ - R, val, (register_test_patterns[pat] & W & M)); \ - *data = R; \ - writel(before, adapter->hw.hw_addr + R); \ - return 1; \ - } \ - writel(before, adapter->hw.hw_addr + R); \ - } \ +static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 pat, val, before; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { + before = ixgbevf_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, + register_test_patterns[pat] & write); + val = ixgbevf_read_reg(&adapter->hw, reg); + if (val != (register_test_patterns[pat] & write & mask)) { + hw_dbg(&adapter->hw, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, + register_test_patterns[pat] & write & mask); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + } + return false; } -#define REG_SET_AND_CHECK(R, M, W) \ -{ \ - u32 val, before; \ - before = readl(adapter->hw.hw_addr + R); \ - writel((W & M), (adapter->hw.hw_addr + R)); \ - val = readl(adapter->hw.hw_addr + R); \ - if ((W & M) != (val & M)) { \ - pr_err("set/check reg %04X test failed: got 0x%08X expected " \ - "0x%08X\n", R, (val & M), (W & M)); \ - *data = R; \ - writel(before, (adapter->hw.hw_addr + R)); \ - return 1; \ - } \ - writel(before, (adapter->hw.hw_addr + R)); \ +static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val, before; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = ixgbevf_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, write & mask); + val = ixgbevf_read_reg(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), write & mask); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + return false; } static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) @@ -572,6 +586,12 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) const struct ixgbevf_reg_test *test; u32 i; + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + dev_err(&adapter->pdev->dev, + "Adapter removed - register test blocked\n"); + *data = 1; + return 1; + } test = reg_test_vf; /* @@ -580,38 +600,47 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) */ while (test->reg) { for (i = 0; i < test->array_len; i++) { + bool b = false; + switch (test->test_type) { case PATTERN_TEST: - REG_PATTERN_TEST(test->reg + (i * 0x40), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); break; case SET_READ_TEST: - REG_SET_AND_CHECK(test->reg + (i * 0x40), - test->mask, - test->write); + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); break; case WRITE_NO_TEST: - writel(test->write, - (adapter->hw.hw_addr + test->reg) - + (i * 0x40)); + ixgbe_write_reg(&adapter->hw, + test->reg + (i * 0x40), + test->write); break; case TABLE32_TEST: - REG_PATTERN_TEST(test->reg + (i * 4), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); break; case TABLE64_TEST_LO: - REG_PATTERN_TEST(test->reg + (i * 8), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); break; case TABLE64_TEST_HI: - REG_PATTERN_TEST((test->reg + 4) + (i * 8), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); break; } + if (b) + return 1; } test++; } @@ -626,6 +655,14 @@ static void ixgbevf_diag_test(struct net_device *netdev, struct ixgbevf_adapter *adapter = netdev_priv(netdev); bool if_running = netif_running(netdev); + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + dev_err(&adapter->pdev->dev, + "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } set_bit(__IXGBEVF_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 54829326bb09..e7e7d695816b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -315,6 +315,11 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } +static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) +{ + writel(value, ring->tail); +} + #define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) #define IXGBEVF_TX_DESC(R, i) \ @@ -401,6 +406,7 @@ struct ixgbevf_adapter { u64 bp_tx_missed; #endif + u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 link_speed; bool link_up; @@ -412,7 +418,9 @@ struct ixgbevf_adapter { enum ixbgevf_state_t { __IXGBEVF_TESTING, __IXGBEVF_RESETTING, - __IXGBEVF_DOWN + __IXGBEVF_DOWN, + __IXGBEVF_DISABLED, + __IXGBEVF_REMOVING, }; struct ixgbevf_cb { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9df28985eba7..4ba139b2d25a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -99,6 +99,49 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); +static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + dev_err(&adapter->pdev->dev, "Adapter removed\n"); + schedule_work(&adapter->watchdog_task); +} + +static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned IXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == IXGBE_VFSTATUS) { + ixgbevf_remove_adapter(hw); + return; + } + value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbevf_remove_adapter(hw); +} + +u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u32 value; + + if (IXGBE_REMOVED(reg_addr)) + return IXGBE_FAILED_READ_REG; + value = readl(reg_addr + reg); + if (unlikely(value == IXGBE_FAILED_READ_REG)) + ixgbevf_check_remove(hw, reg); + return value; +} + static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, u32 val) { @@ -111,7 +154,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, * such as IA-64). */ wmb(); - writel(val, rx_ring->tail); + ixgbevf_write_tail(rx_ring, val); } /** @@ -516,7 +559,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, /* Workaround hardware that can't do proper VEPA multicast * source pruning. */ - if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && + if ((skb->pkt_type == PACKET_BROADCAST || + skb->pkt_type == PACKET_MULTICAST) && ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); @@ -607,7 +651,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) napi_complete(napi); if (adapter->rx_itr_setting & 1) ixgbevf_set_itr(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) ixgbevf_irq_enable_queues(adapter, 1 << q_vector->v_idx); @@ -832,7 +877,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data) hw->mac.get_link_status = 1; - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); @@ -1136,7 +1182,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); - ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx); + ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; @@ -1256,6 +1302,8 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; + if (IXGBE_REMOVED(hw->hw_addr)) + return; rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; @@ -1281,6 +1329,8 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; + if (IXGBE_REMOVED(hw->hw_addr)) + return; do { usleep_range(1000, 2000); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); @@ -1315,7 +1365,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); - ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx); + ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; @@ -1617,6 +1667,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); + smp_mb__before_clear_bit(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -1741,7 +1792,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) int i; /* signal that we are down to the interrupt handler */ - set_bit(__IXGBEVF_DOWN, &adapter->state); + if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) + return; /* do nothing if already down */ /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -1817,7 +1869,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, int vectors) { - int err = 0; int vector_threshold; /* We'll want at least 2 (vector_threshold): @@ -1831,33 +1882,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); - if (!err || err < 0) /* Success or a nasty failure. */ - break; - else /* err == number of vectors we should try again with */ - vectors = err; - } - - if (vectors < vector_threshold) - err = -ENOMEM; + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); - if (err) { + if (vectors < 0) { dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); kfree(adapter->msix_entries); adapter->msix_entries = NULL; - } else { - /* - * Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of - * vectors we were allocated. - */ - adapter->num_msix_vectors = vectors; + return vectors; } - return err; + /* Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = vectors; + + return 0; } /** @@ -2338,6 +2380,7 @@ static void ixgbevf_reset_task(struct work_struct *work) /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_REMOVING, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; @@ -2361,6 +2404,14 @@ static void ixgbevf_watchdog_task(struct work_struct *work) bool link_up = adapter->link_up; s32 need_reset; + if (IXGBE_REMOVED(hw->hw_addr)) { + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { + rtnl_lock(); + ixgbevf_down(adapter); + rtnl_unlock(); + } + return; + } ixgbevf_queue_reset_subtask(adapter); adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; @@ -2422,7 +2473,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work) pf_has_reset: /* Reset the timer */ - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + (2 * HZ))); @@ -2787,6 +2839,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -2857,12 +2912,12 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; - case __constant_htons(ETH_P_IPV6): + case htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; @@ -3060,7 +3115,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_ring->next_to_use = i; /* notify HW of packet */ - writel(i, tx_ring->tail); + ixgbevf_write_tail(tx_ring, i); return; dma_error: @@ -3165,7 +3220,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tso = ixgbevf_tso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; - else + else if (!tso) ixgbevf_tx_csum(tx_ring, first); ixgbevf_tx_map(tx_ring, first, hdr_len); @@ -3274,7 +3329,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) return retval; #endif - pci_disable_device(pdev); + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); return 0; } @@ -3286,7 +3342,6 @@ static int ixgbevf_resume(struct pci_dev *pdev) struct ixgbevf_adapter *adapter = netdev_priv(netdev); u32 err; - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* * pci_restore_state clears dev->state_saved so call @@ -3299,6 +3354,8 @@ static int ixgbevf_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } + smp_mb__before_clear_bit(); + clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); ixgbevf_reset(adapter); @@ -3344,10 +3401,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) { ring = adapter->rx_ring[i]; do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); bytes = ring->stats.bytes; packets = ring->stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_bytes += bytes; stats->rx_packets += packets; } @@ -3355,10 +3412,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, for (i = 0; i < adapter->num_tx_queues; i++) { ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); bytes = ring->stats.bytes; packets = ring->stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->tx_bytes += bytes; stats->tx_packets += packets; } @@ -3460,6 +3517,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; if (!hw->hw_addr) { err = -EIO; goto err_ioremap; @@ -3545,14 +3603,15 @@ err_register: ixgbevf_clear_interrupt_scheme(adapter); err_sw_init: ixgbevf_reset_interrupt_capability(adapter); - iounmap(hw->hw_addr); + iounmap(adapter->io_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: - pci_disable_device(pdev); + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); return err; } @@ -3570,7 +3629,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); - set_bit(__IXGBEVF_DOWN, &adapter->state); + set_bit(__IXGBEVF_REMOVING, &adapter->state); del_timer_sync(&adapter->watchdog_timer); @@ -3583,14 +3642,15 @@ static void ixgbevf_remove(struct pci_dev *pdev) ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); - iounmap(adapter->hw.hw_addr); + iounmap(adapter->io_addr); pci_release_regions(pdev); hw_dbg(&adapter->hw, "Remove complete\n"); free_netdev(netdev); - pci_disable_device(pdev); + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); } /** @@ -3607,15 +3667,20 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); netif_device_detach(netdev); - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; + } if (netif_running(netdev)) ixgbevf_down(adapter); - pci_disable_device(pdev); + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -3639,6 +3704,8 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } + smp_mb__before_clear_bit(); + clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); ixgbevf_reset(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index debd8c0e1f28..09dd8f698bea 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -70,16 +70,6 @@ #define IXGBE_VFGOTC_MSB 0x02024 #define IXGBE_VFMPRC 0x01034 -#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) - -#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) - -#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \ - writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) - -#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ - readl((a)->hw_addr + (reg) + ((offset) << 2))) - #define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) #endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 7b1f502d1716..3061d1890471 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -172,6 +172,37 @@ struct ixgbevf_info { const struct ixgbe_mac_operations *mac_ops; }; +#define IXGBE_FAILED_READ_REG 0xffffffffU + +#define IXGBE_REMOVED(a) unlikely(!(a)) + +static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (IXGBE_REMOVED(reg_addr)) + return; + writel(value, reg_addr + reg); +} +#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) + +u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); +#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) + +static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, + u32 offset, u32 value) +{ + ixgbe_write_reg(hw, reg + (offset << 2), value); +} +#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) + +static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, + u32 offset) +{ + return ixgbevf_read_reg(hw, reg + (offset << 2)); +} +#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) + void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f5685c0d0579..b0c6050479eb 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2054,19 +2054,6 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) } static int -jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) -{ - if (unlikely(skb_shinfo(skb)->gso_size && - skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { - dev_kfree_skb(skb); - return -1; - } - - return 0; -} - -static int jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) { *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); @@ -2225,7 +2212,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) struct jme_adapter *jme = netdev_priv(netdev); int idx; - if (unlikely(jme_expand_header(jme, skb))) { + if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) { + dev_kfree_skb_any(skb); ++(NET_STAT(jme).tx_dropped); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a2565ce22b7c..b7b8d74c22d9 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -730,7 +730,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) unlikely(tag_bytes & ~12)) { if (skb_checksum_help(skb) == 0) goto no_csum; - kfree_skb(skb); + dev_kfree_skb_any(skb); return 1; } @@ -819,7 +819,7 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { if (net_ratelimit()) netdev_err(dev, "tx queue full?!\n"); - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index fd409d76b811..b161a525fc5b 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -167,11 +167,6 @@ out: return ret; } -static int orion_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id) { struct orion_mdio_dev *dev = dev_id; @@ -209,7 +204,6 @@ static int orion_mdio_probe(struct platform_device *pdev) bus->name = "orion_mdio_bus"; bus->read = orion_mdio_read; bus->write = orion_mdio_write; - bus->reset = orion_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); bus->parent = &pdev->dev; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f418f4f20f94..d04b1c3c9b85 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -22,6 +22,7 @@ #include <linux/interrupt.h> #include <net/ip.h> #include <net/ipv6.h> +#include <linux/io.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> @@ -88,8 +89,9 @@ #define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c -#define MVNETA_SGMII_SERDES_CFG 0x24A0 +#define MVNETA_SERDES_CFG 0x24A0 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 +#define MVNETA_RGMII_SERDES_PROTO 0x0667 #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 @@ -161,7 +163,7 @@ #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVNETA_GMAC0_PORT_ENABLE BIT(0) #define MVNETA_GMAC_CTRL_2 0x2c08 -#define MVNETA_GMAC2_PSC_ENABLE BIT(3) +#define MVNETA_GMAC2_PCS_ENABLE BIT(3) #define MVNETA_GMAC2_PORT_RGMII BIT(4) #define MVNETA_GMAC2_PORT_RESET BIT(6) #define MVNETA_GMAC_STATUS 0x2c10 @@ -508,12 +510,12 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, cpu_stats = per_cpu_ptr(pp->stats, cpu); do { - start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); rx_packets = cpu_stats->rx_packets; rx_bytes = cpu_stats->rx_bytes; tx_packets = cpu_stats->tx_packets; tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -710,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } - - -/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ -static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) -{ - u32 val; - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - - if (enable) - val |= MVNETA_GMAC2_PORT_RGMII; - else - val &= ~MVNETA_GMAC2_PORT_RGMII; - - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); -} - -/* Config SGMII port */ -static void mvneta_port_sgmii_config(struct mvneta_port *pp) -{ - u32 val; - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val |= MVNETA_GMAC2_PSC_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); - - mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); -} - /* Start the Ethernet port RX and TX activity */ static void mvneta_port_up(struct mvneta_port *pp) { @@ -2756,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); if (phy_mode == PHY_INTERFACE_MODE_SGMII) - mvneta_port_sgmii_config(pp); + mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); + else + mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO); + + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - mvneta_gmac_rgmii_set(pp, 1); + val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; /* Cancel Port Reset */ - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); val &= ~MVNETA_GMAC2_PORT_RESET; mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); @@ -2774,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) static int mvneta_probe(struct platform_device *pdev) { const struct mbus_dram_target_info *dram_target_info; + struct resource *res; struct device_node *dn = pdev->dev.of_node; struct device_node *phy_node; u32 phy_addr; @@ -2784,7 +2761,6 @@ static int mvneta_probe(struct platform_device *pdev) const char *mac_from; int phy_mode; int err; - int cpu; /* Our multiqueue support is not complete, so for now, only * allow the usage of the first RX queue @@ -2838,23 +2814,18 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk); - pp->base = of_iomap(dn, 0); - if (pp->base == NULL) { - err = -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pp->base)) { + err = PTR_ERR(pp->base); goto err_clk; } /* Alloc per-cpu stats */ - pp->stats = alloc_percpu(struct mvneta_pcpu_stats); + pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); if (!pp->stats) { err = -ENOMEM; - goto err_unmap; - } - - for_each_possible_cpu(cpu) { - struct mvneta_pcpu_stats *stats; - stats = per_cpu_ptr(pp->stats, cpu); - u64_stats_init(&stats->syncp); + goto err_clk; } dt_mac_addr = of_get_mac_address(dn); @@ -2913,8 +2884,6 @@ err_deinit: mvneta_deinit(pp); err_free_stats: free_percpu(pp->stats); -err_unmap: - iounmap(pp->base); err_clk: clk_disable_unprepare(pp->clk); err_free_irq: @@ -2934,7 +2903,6 @@ static int mvneta_remove(struct platform_device *pdev) mvneta_deinit(pp); clk_disable_unprepare(pp->clk); free_percpu(pp->stats); - iounmap(pp->base); irq_dispose_mapping(dev->irq); free_netdev(dev); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 597846193869..7f81ae66cc89 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2845,7 +2845,7 @@ mapping_unwind: mapping_error: if (net_ratelimit()) dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3172,7 +3172,7 @@ static void skge_tx_done(struct net_device *dev) pkts_compl++; bytes_compl += e->skb->len; - dev_kfree_skb(e->skb); + dev_consume_skb_any(e->skb); } } netdev_completed_queue(dev, pkts_compl, bytes_compl); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 55a37ae11440..b81106451a0a 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -44,6 +44,8 @@ #include <linux/prefetch.h> #include <linux/debugfs.h> #include <linux/mii.h> +#include <linux/of_device.h> +#include <linux/of_net.h> #include <asm/irq.h> @@ -2000,7 +2002,7 @@ mapping_unwind: mapping_error: if (net_ratelimit()) dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2733,6 +2735,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) unsigned int total_bytes[2] = { 0 }; unsigned int total_packets[2] = { 0 }; + if (to_do <= 0) + return work_done; + rmb(); do { struct sky2_port *sky2; @@ -3906,19 +3911,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, u64 _bytes, _packets; do { - start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); _bytes = sky2->rx_stats.bytes; _packets = sky2->rx_stats.packets; - } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); stats->rx_packets = _packets; stats->rx_bytes = _bytes; do { - start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); _bytes = sky2->tx_stats.bytes; _packets = sky2->tx_stats.packets; - } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); stats->tx_packets = _packets; stats->tx_bytes = _bytes; @@ -4748,6 +4753,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, { struct sky2_port *sky2; struct net_device *dev = alloc_etherdev(sizeof(*sky2)); + const void *iap; if (!dev) return NULL; @@ -4805,8 +4811,16 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, dev->features |= dev->hw_features; - /* read the mac address */ - memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); + /* try to get mac address in the following order: + * 1) from device tree data + * 2) from internal registers set by bootloader + */ + iap = of_get_mac_address(hw->pdev->dev.of_node); + if (iap) + memcpy(dev->dev_addr, iap, ETH_ALEN); + else + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, + ETH_ALEN); return dev; } diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 563495d8975a..1486ce902a56 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -3,7 +3,7 @@ # config MLX4_EN - tristate "Mellanox Technologies 10Gbit Ethernet support" + tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" depends on PCI select MLX4_CORE select PTP_1588_CLOCK @@ -23,6 +23,13 @@ config MLX4_EN_DCB If unsure, set to Y +config MLX4_EN_VXLAN + bool "VXLAN offloads Support" + default y + depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m) + ---help--- + Say Y here if you want to use VXLAN offloads in the driver. + config MLX4_CORE tristate depends on PCI diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0d02fba94536..78099eab7673 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -800,16 +800,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } -static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - return -EPERM; -} - -static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave, +static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, @@ -964,6 +955,15 @@ static struct mlx4_cmd_info cmd_info[] = { .wrapper = NULL }, { + .opcode = MLX4_CMD_CONFIG_DEV, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, + { .opcode = MLX4_CMD_ALLOC_RES, .has_inbox = false, .has_outbox = false, @@ -1258,7 +1258,7 @@ static struct mlx4_cmd_info cmd_info[] = { .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = MLX4_CMD_UPDATE_QP_wrapper + .wrapper = mlx4_CMD_EPERM_wrapper }, { .opcode = MLX4_CMD_GET_OP_REQ, @@ -1267,7 +1267,7 @@ static struct mlx4_cmd_info cmd_info[] = { .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = MLX4_CMD_GET_OP_REQ_wrapper, + .wrapper = mlx4_CMD_EPERM_wrapper, }, { .opcode = MLX4_CMD_CONF_SPECIAL_QP, @@ -1378,7 +1378,7 @@ static struct mlx4_cmd_info cmd_info[] = { .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper + .wrapper = mlx4_CMD_EPERM_wrapper }, }; @@ -1643,8 +1643,16 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) int port, err; struct mlx4_vport_state *vp_admin; struct mlx4_vport_oper_state *vp_oper; - - for (port = 1; port <= MLX4_MAX_PORTS; port++) { + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; vp_oper->state = *vp_admin; @@ -1685,8 +1693,17 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave { int port; struct mlx4_vport_oper_state *vp_oper; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + - for (port = 1; port <= MLX4_MAX_PORTS; port++) { + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (NO_INDX != vp_oper->vlan_idx) { __mlx4_unregister_vlan(&priv->dev, @@ -2234,6 +2251,112 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) return vf+1; } +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) +{ + if (slave < 1 || slave > dev->num_vfs) { + mlx4_err(dev, + "Bad slave number:%d (number of activated slaves: %lu)\n", + slave, dev->num_slaves); + return -EINVAL; + } + return slave - 1; +} + +struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + int vf; + + bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS); + + if (slave == 0) { + bitmap_fill(actv_ports.ports, dev->caps.num_ports); + return actv_ports; + } + + vf = mlx4_get_vf_indx(dev, slave); + if (vf < 0) + return actv_ports; + + bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1, + min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports, + dev->caps.num_ports)); + + return actv_ports; +} +EXPORT_SYMBOL_GPL(mlx4_get_active_ports); + +int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port) +{ + unsigned n; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port <= 0 || port > m) + return -EINVAL; + + n = find_first_bit(actv_ports.ports, dev->caps.num_ports); + if (port <= n) + port = n + 1; + + return port; +} +EXPORT_SYMBOL_GPL(mlx4_slave_convert_port); + +int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + if (test_bit(port - 1, actv_ports.ports)) + return port - + find_first_bit(actv_ports.ports, dev->caps.num_ports); + + return -1; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, + int port) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + if (port <= 0 || port > dev->caps.num_ports) + return slaves_pport; + + for (i = 0; i < dev->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (test_bit(port - 1, actv_ports.ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( + struct mlx4_dev *dev, + const struct mlx4_active_ports *crit_ports) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + for (i = 0; i < dev->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (bitmap_equal(crit_ports->ports, actv_ports.ports, + dev->caps.num_ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); + int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2289,6 +2412,30 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); + /* mlx4_get_slave_default_vlan - + * return true if VST ( default vlan) + * if VST, will return vlan & qos (if not NULL) + */ +bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, + u16 *vlan, u8 *qos) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_priv *priv; + + priv = mlx4_priv(dev); + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + + if (MLX4_VGT != vp_oper->state.default_vlan) { + if (vlan) + *vlan = vp_oper->state.default_vlan; + if (qos) + *qos = vp_oper->state.default_qos; + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan); + int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index abaf6bb22416..57dda95b67d8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -276,6 +276,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, + .n_pins = 0, .pps = 0, .adjfreq = mlx4_en_phc_adjfreq, .adjtime = mlx4_en_phc_adjtime, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index b4881b686159..c95ca252187c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) int has_ets_tc = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - if (ets->prio_tc[i] > MLX4_EN_NUM_UP) { + if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) { en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", i, ets->prio_tc[i]); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index d357bf5a4686..0c59d4fe7e3a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." " Per priority bit mask"); +MLX4_EN_PARM_INT(inline_thold, MAX_INLINE, + "Threshold for using inline data (range: 17-104, default: 104)"); + +#define MAX_PFC_TX 0xff +#define MAX_PFC_RX 0xff + int en_print(const char *level, const struct mlx4_en_priv *priv, const char *format, ...) { @@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].tx_ring_num = params->num_tx_rings_p_up * MLX4_EN_NUM_UP; params->prof[i].rss_rings = 0; + params->prof[i].inline_thold = inline_thold; } return 0; @@ -274,19 +281,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) mlx4_en_init_timestamp(mdev); - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - if (!dev->caps.comp_pool) { - mdev->profile.prof[i].rx_ring_num = - rounddown_pow_of_two(max_t(int, MIN_RX_RINGS, - min_t(int, - dev->caps.num_comp_vectors, - DEF_RX_RINGS))); - } else { - mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two( - min_t(int, dev->caps.comp_pool/ - dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1)); - } - } + /* Set default number of RX rings*/ + mlx4_en_set_num_rx_rings(mdev); /* Create our own workqueue for reset/multicast tasks * Note: we cannot use the shared workqueue because of deadlocks caused @@ -336,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = { .protocol = MLX4_PROT_ETH, }; +static void mlx4_en_verify_params(void) +{ + if (pfctx > MAX_PFC_TX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfctx, MAX_PFC_TX); + pfctx = 0; + } + + if (pfcrx > MAX_PFC_RX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfcrx, MAX_PFC_RX); + pfcrx = 0; + } + + if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) { + pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n", + inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE); + inline_thold = MAX_INLINE; + } +} + static int __init mlx4_en_init(void) { + mlx4_en_verify_params(); + return mlx4_register_interface(&mlx4_en_interface); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index fad45316200a..f085c2df5e69 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -39,6 +39,7 @@ #include <linux/hash.h> #include <net/ip.h> #include <net/busy_poll.h> +#include <net/vxlan.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> @@ -603,7 +604,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) int err = 0; u64 reg_id; int *qpn = &priv->base_qpn; - u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", priv->dev->dev_addr); @@ -672,7 +673,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) u64 mac; if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { - mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + mac = mlx4_mac_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", priv->dev->dev_addr); mlx4_unregister_mac(dev, priv->port, mac); @@ -685,7 +686,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { bucket = &priv->mac_hash[i]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { - mac = mlx4_en_mac_to_u64(entry->mac); + mac = mlx4_mac_to_u64(entry->mac); en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, @@ -715,14 +716,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int err = 0; - u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac); + u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { struct hlist_head *bucket; unsigned int mac_hash; struct mlx4_mac_entry *entry; struct hlist_node *tmp; - u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); + u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { @@ -742,6 +743,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, err = mlx4_en_uc_steer_add(priv, new_mac, &qpn, &entry->reg_id); + if (err) + return err; + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, + &priv->tunnel_reg_id); return err; } } @@ -751,18 +760,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); } -u64 mlx4_en_mac_to_u64(u8 *addr) -{ - u64 mac = 0; - int i; - - for (i = 0; i < ETH_ALEN; i++) { - mac <<= 8; - mac |= addr[i]; - } - return mac; -} - static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) { int err = 0; @@ -1081,7 +1078,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, mlx4_en_cache_mclist(dev); netif_addr_unlock_bh(dev); list_for_each_entry(mclist, &priv->mc_list, list) { - mcast_addr = mlx4_en_mac_to_u64(mclist->addr); + mcast_addr = mlx4_mac_to_u64(mclist->addr); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } @@ -1173,7 +1170,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, found = true; if (!found) { - mac = mlx4_en_mac_to_u64(entry->mac); + mac = mlx4_mac_to_u64(entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, priv->base_qpn, entry->reg_id); @@ -1216,7 +1213,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; break; } - mac = mlx4_en_mac_to_u64(ha->addr); + mac = mlx4_mac_to_u64(ha->addr); memcpy(entry->mac, ha->addr, ETH_ALEN); err = mlx4_register_mac(mdev->dev, priv->port, mac); if (err < 0) { @@ -1669,7 +1666,7 @@ int mlx4_en_start_port(struct net_device *dev) } if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC); + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); if (err) { en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", err); @@ -1701,6 +1698,10 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); +#ifdef CONFIG_MLX4_EN_VXLAN + if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + vxlan_get_rx_port(dev); +#endif priv->port_up = true; netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -1792,6 +1793,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, mclist->reg_id); + if (mclist->tunnel_reg_id) + mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); } mlx4_en_clear_list(dev); list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { @@ -2206,7 +2209,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) { struct mlx4_en_priv *en_priv = netdev_priv(dev); struct mlx4_en_dev *mdev = en_priv->mdev; - u64 mac_u64 = mlx4_en_mac_to_u64(mac); + u64 mac_u64 = mlx4_mac_to_u64(mac); if (!is_valid_ether_addr(mac)) return -EINVAL; @@ -2266,6 +2269,83 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev, return 0; } +#ifdef CONFIG_MLX4_EN_VXLAN +static void mlx4_en_add_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_add_task); + + ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); + if (ret) + goto out; + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 1); +out: + if (ret) + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); +} + +static void mlx4_en_del_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_del_task); + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 0); + if (ret) + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + + priv->vxlan_port = 0; +} + +static void mlx4_en_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port && current_port != port) { + en_warn(priv, "vxlan port %d configured, can't add port %d\n", + ntohs(current_port), ntohs(port)); + return; + } + + priv->vxlan_port = port; + queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); +} + +static void mlx4_en_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port != port) { + en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); + return; + } + + queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); +} +#endif + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2292,6 +2372,10 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_busy_poll = mlx4_en_low_latency_recv, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, +#endif }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2341,7 +2425,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, netif_set_real_num_rx_queues(dev, prof->rx_ring_num); SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); - dev->dev_id = port - 1; + dev->dev_port = port - 1; /* * Initialize driver private data @@ -2383,6 +2467,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +#ifdef CONFIG_MLX4_EN_VXLAN + INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); + INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); +#endif #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { @@ -2407,7 +2495,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (mlx4_is_slave(priv->mdev->dev)) { eth_hw_addr_random(dev); en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); - mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr); + mac_u64 = mlx4_mac_to_u64(dev->dev_addr); mdev->dev->caps.def_mac[priv->port] = mac_u64; } else { en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", @@ -2516,7 +2604,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC); + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); if (err) { en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index dae1a1f4ae55..c2cfb05e7290 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->tx_packets = 0; stats->tx_bytes = 0; priv->port_stats.tx_chksum_offload = 0; + priv->port_stats.queue_stopped = 0; + priv->port_stats.wake_queue = 0; + for (i = 0; i < priv->tx_ring_num; i++) { stats->tx_packets += priv->tx_ring[i]->packets; stats->tx_bytes += priv->tx_ring[i]->bytes; priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; + priv->port_stats.queue_stopped += + priv->tx_ring[i]->queue_stopped; + priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue; } stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 890922c1c8ee..ba049ae88749 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, } } +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) +{ + int i; + int num_of_eqs; + int num_rx_rings; + struct mlx4_dev *dev = mdev->dev; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + if (!dev->caps.comp_pool) + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + dev->caps.num_comp_vectors, + DEF_RX_RINGS)); + else + num_of_eqs = min_t(int, MAX_MSIX_P_PORT, + dev->caps.comp_pool/ + dev->caps.num_ports) - 1; + + num_rx_rings = min_t(int, num_of_eqs, + netif_get_num_default_rss_queues()); + mdev->profile.prof[i].rx_ring_num = + rounddown_pow_of_two(num_rx_rings); + } +} + int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride, int node) @@ -636,6 +661,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (!priv->port_up) return 0; + if (budget <= 0) + return polled; + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index c11d063473e5..03e5f6ac67e7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; - /* The device currently only supports 10G speed */ - if (priv->port_state.link_speed != SPEED_10000) + /* The device supports 1G, 10G and 40G speeds */ + if (priv->port_state.link_speed != 1000 && + priv->port_state.link_speed != 10000 && + priv->port_state.link_speed != 40000) return priv->port_state.link_speed; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 13457032d15f..dd1f6d346459 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -44,16 +44,6 @@ #include "mlx4_en.h" -enum { - MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ - MAX_BF = 256, -}; - -static int inline_thold __read_mostly = MAX_INLINE; - -module_param_named(inline_thold, inline_thold, int, 0444); -MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); - int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, int qpn, u32 size, u16 stride, int node, int queue_index) @@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; ring->stride = stride; - - inline_thold = min(inline_thold, MAX_INLINE); + ring->inline_thold = priv->prof->inline_thold; tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc_node(tmp, node); @@ -325,7 +314,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, } } } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return tx_info->nr_txbb; } @@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev, */ if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; + ring->wake_queue++; } return done; } @@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, return ring->buf + index * TXBB_SIZE; } -static int is_inline(struct sk_buff *skb, void **pfrag) +static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag) { void *ptr; @@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, } } else { *lso_header_size = 0; - if (!is_inline(skb, NULL)) + if (!is_inline(priv->prof->inline_thold, skb, NULL)) real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; else real_size = inline_size(skb); @@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; if (skb->len <= spc) { - inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + if (likely(skb->len >= MIN_PKT_LEN)) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + } else { + inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN); + memset(((void *)(inl + 1)) + skb->len, 0, + MIN_PKT_LEN - skb->len); + } skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); if (skb_shinfo(skb)->nr_frags) memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, @@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - priv->port_stats.queue_stopped++; + ring->queue_stopped++; /* If queue was emptied after the if, and before the * stop_queue - need to wake the queue, or else it will remain @@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(((int)(ring->prod - ring->cons)) <= ring->size - HEADROOM - MAX_DESC_TXBBS)) { netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; + ring->wake_queue++; } else { return NETDEV_TX_BUSY; } @@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->data_offset = (void *)data - (void *)tx_desc; tx_info->linear = (lso_header_size < skb_headlen(skb) && - !is_inline(skb, NULL)) ? 1 : 0; + !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0; data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; - if (is_inline(skb, &fragptr)) { + if (is_inline(ring->inline_thold, skb, &fragptr)) { tx_info->inl = 1; } else { /* Map fragments */ @@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { - *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); + tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn); + op_own |= htonl((bf_index & 0xffff) << 8); /* Ensure new descirptor hits memory * before setting ownership of this descriptor to HW */ diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8992b38578d5..d501a2b0fb79 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -271,7 +271,10 @@ enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; - if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) { + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return SLAVE_PORT_DOWN; @@ -285,8 +288,10 @@ static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); - if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return -1; @@ -300,9 +305,13 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) { int i; enum slave_port_gen_event gen_event; + struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, + port); - for (i = 0; i < dev->num_slaves; i++) - set_and_calc_slave_port_state(dev, i, port, event, &gen_event); + for (i = 0; i < dev->num_vfs + 1; i++) + if (test_bit(i, slaves_pport.slaves)) + set_and_calc_slave_port_state(dev, i, port, + event, &gen_event); } /************************************************************************** The function get as input the new event to that port, @@ -321,12 +330,14 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, struct mlx4_slave_state *ctx = NULL; unsigned long flags; int ret = -1; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); enum slave_port_state cur_state = mlx4_get_slave_port_state(dev, slave, port); *gen_event = SLAVE_PORT_GEN_EVENT_NONE; - if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return ret; @@ -542,15 +553,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) be64_to_cpu(eqe->event.cmd.out_param)); break; - case MLX4_EVENT_TYPE_PORT_CHANGE: + case MLX4_EVENT_TYPE_PORT_CHANGE: { + struct mlx4_slaves_pport slaves_port; port = be32_to_cpu(eqe->event.port_change.port) >> 28; + slaves_port = mlx4_phys_to_slaves_pport(dev, port); if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, port); mlx4_priv(dev)->sense.do_sense_port[port] = 1; if (!mlx4_is_master(dev)) break; - for (i = 0; i < dev->num_slaves; i++) { + for (i = 0; i < dev->num_vfs + 1; i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (i == mlx4_master_func_num(dev)) continue; @@ -558,8 +573,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) " to slave: %d, port:%d\n", __func__, i, port); s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; - if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); mlx4_slave_event(dev, i, eqe); + } } else { /* IB port */ set_and_calc_slave_port_state(dev, i, port, MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, @@ -580,12 +600,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) if (!mlx4_is_master(dev)) break; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - for (i = 0; i < dev->num_slaves; i++) { + for (i = 0; i < dev->num_vfs + 1; i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; if (i == mlx4_master_func_num(dev)) continue; s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; - if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); mlx4_slave_event(dev, i, eqe); + } } else /* IB port */ /* port-up event will be sent to a slave when the @@ -594,6 +621,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); } break; + } case MLX4_EVENT_TYPE_CQ_ERROR: mlx4_warn(dev, "CQ %s on CQN %06x\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 91b69ff4b4a2..d16a4d118903 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -129,13 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [0] = "RSS support", [1] = "RSS Toeplitz Hash Function support", [2] = "RSS XOR Hash Function support", - [3] = "Device manage flow steering support", + [3] = "Device managed flow steering support", [4] = "Automatic MAC reassignment support", [5] = "Time stamping support", [6] = "VST (control vlan insertion/stripping) support", [7] = "FSM (MAC anti-spoofing) support", [8] = "Dynamic QP updates support", - [9] = "TCP/IP offloads/flow-steering for VXLAN support" + [9] = "Device managed flow steering IPoIB support", + [10] = "TCP/IP offloads/flow-steering for VXLAN support" }; int i; @@ -224,13 +225,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 if (vhcr->op_modifier == 1) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); + int converted_port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier); + + if (converted_port < 0) + return -EINVAL; + + vhcr->in_modifier = converted_port; /* Set nic_info bit to mark new fields support */ field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); - field = vhcr->in_modifier; /* phys-port = logical-port */ + /* phys-port = logical-port */ + field = vhcr->in_modifier - + find_first_bit(actv_ports.ports, dev->caps.num_ports); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + field = vhcr->in_modifier; /* size is now the QP number */ size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); @@ -248,12 +261,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, QUERY_FUNC_CAP_PHYS_PORT_ID); } else if (vhcr->op_modifier == 0) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); /* enable rdma and ethernet interfaces, and new quota locations */ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | QUERY_FUNC_CAP_FLAG_QUOTAS); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); - field = dev->caps.num_ports; + field = min( + bitmap_weight(actv_ports.ports, dev->caps.num_ports), + dev->caps.num_ports); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); size = dev->caps.function_caps; /* set PF behaviours */ @@ -839,6 +856,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, int err = 0; u8 field; u32 bmme_flags; + int real_port; + int slave_port; + int first_port; + struct mlx4_active_ports actv_ports; err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); @@ -851,15 +872,33 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; + actv_ports = mlx4_get_active_ports(dev, slave); + first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); + for (slave_port = 0, real_port = first_port; + real_port < first_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + ++real_port, ++slave_port) { + if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) + flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; + else + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); + } + for (; slave_port < dev->caps.num_ports; ++slave_port) + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); + field &= ~0x0F; + field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); + /* For guests, disable timestamp */ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); /* For guests, disable vxlan tunneling */ - MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); field &= 0xf7; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); @@ -869,7 +908,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); /* For guests, disable mw type 2 */ - MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); @@ -883,7 +922,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, } /* turn off ipoib managed steering for guests */ - MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); field &= ~0x80; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); @@ -902,12 +941,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, u16 short_field; int err; int admin_link_state; + int port = mlx4_slave_convert_port(dev, slave, + vhcr->in_modifier & 0xFF); #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 #define MLX4_PORT_LINK_UP_MASK 0x80 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e + if (port < 0) + return -EINVAL; + + vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | + (port & 0xFF); + err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); @@ -934,7 +981,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); - short_field = 1; /* slave max gids */ + if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) + short_field = mlx4_get_slave_num_gids(dev, slave, port); + else + short_field = 1; /* slave max gids */ MLX4_PUT(outbox->buf, short_field, QUERY_PORT_CUR_MAX_GID_OFFSET); @@ -1584,9 +1634,12 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); - int port = vhcr->in_modifier; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); int err; + if (port < 0) + return -EINVAL; + if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) return 0; @@ -1676,9 +1729,12 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); - int port = vhcr->in_modifier; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); int err; + if (port < 0) + return -EINVAL; + if (!(priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))) return 0; @@ -1723,6 +1779,46 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) MLX4_CMD_NATIVE); } +struct mlx4_config_dev { + __be32 update_flags; + __be32 rsdv1[3]; + __be16 vxlan_udp_dport; + __be16 rsvd2; +}; + +#define MLX4_VXLAN_UDP_DPORT (1 << 0) + +static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); + config_dev.vxlan_udp_dport = udp_port; + + return mlx4_CONFIG_DEV(dev, &config_dev); +} +EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); + + int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) { int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, @@ -1890,7 +1986,8 @@ void mlx4_opreq_action(struct work_struct *work) err = EINVAL; break; } - err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), + err = mlx4_cmd(dev, 0, ((u32) err | + (__force u32)cpu_to_be32(token) << 16), 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d711158b0d4b..f0ae95f66ceb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -41,7 +41,6 @@ #include <linux/slab.h> #include <linux/io-mapping.h> #include <linux/delay.h> -#include <linux/netdevice.h> #include <linux/kmod.h> #include <linux/mlx4/device.h> @@ -78,13 +77,17 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #endif /* CONFIG_PCI_MSI */ -static int num_vfs; -module_param(num_vfs, int, 0444); -MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); +static uint8_t num_vfs[3] = {0, 0, 0}; +static int num_vfs_argc = 3; +module_param_array(num_vfs, byte , &num_vfs_argc, 0444); +MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" + "num_vfs=port1,port2,port1+2"); -static int probe_vf; -module_param(probe_vf, int, 0644); -MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); +static uint8_t probe_vf[3] = {0, 0, 0}; +static int probe_vfs_argc = 3; +module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); +MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" + "probe_vf=port1,port2,port1+2"); int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; module_param_named(log_num_mgm_entry_size, @@ -150,6 +153,8 @@ struct mlx4_port_config { struct pci_dev *pdev; }; +static atomic_t pf_loading = ATOMIC_INIT(0); + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -749,7 +754,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev) has_eth_port = true; } - if (has_ib_port) + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) request_module_nowait(IB_DRV_NAME); if (has_eth_port) request_module_nowait(EN_DRV_NAME); @@ -1407,6 +1412,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev) u32 slave_read; u32 cmd_channel_ver; + if (atomic_read(&pf_loading)) { + mlx4_warn(dev, "PF is not ready. Deferring probe\n"); + return -EPROBE_DEFER; + } + mutex_lock(&priv->cmd.slave_cmd_mutex); priv->cmd.max_cmds = 1; mlx4_warn(dev, "Sending reset\n"); @@ -1463,7 +1473,11 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) int i; for (i = 1; i <= dev->caps.num_ports; i++) { - dev->caps.gid_table_len[i] = 1; + if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) + dev->caps.gid_table_len[i] = + mlx4_get_slave_num_gids(dev, 0, i); + else + dev->caps.gid_table_len[i] = 1; dev->caps.pkey_table_len[i] = dev->phys_caps.pkey_phys_table_len[i] - 1; } @@ -1488,7 +1502,7 @@ static void choose_steering_mode(struct mlx4_dev *dev, if (mlx4_log_num_mgm_entry_size == -1 && dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && (!mlx4_is_mfunc(dev) || - (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) && + (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= MLX4_MIN_MGM_LOG_ENTRY_SIZE) { dev->oper_log_mgm_entry_size = @@ -1974,9 +1988,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int nreq = min_t(int, dev->caps.num_ports * - min_t(int, netif_get_num_default_rss_queues() + 1, + min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); - int err; int i; if (msi_x) { @@ -1990,23 +2003,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) for (i = 0; i < nreq; ++i) entries[i].entry = i; - retry: - err = pci_enable_msix(dev->pdev, entries, nreq); - if (err) { - /* Try again if at least 2 vectors are available */ - if (err > 1) { - mlx4_info(dev, "Requested %d vectors, " - "but only %d MSI-X vectors available, " - "trying again\n", nreq, err); - nreq = err; - goto retry; - } + nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq); + + if (nreq < 0) { kfree(entries); goto no_msi; - } - - if (nreq < - MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { + } else if (nreq < MSIX_LEGACY_SZ + + dev->caps.num_ports * MIN_MSIX_P_PORT) { /*Working in legacy mode , all EQ's shared*/ dev->caps.comp_pool = 0; dev->caps.num_comp_vectors = nreq - 1; @@ -2194,6 +2197,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) struct mlx4_dev *dev; int err; int port; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { + {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; + unsigned total_vfs = 0; + int sriov_initialized = 0; + unsigned int i; pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); @@ -2208,17 +2218,40 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) * per port, we must limit the number of VFs to 63 (since their are * 128 MACs) */ - if (num_vfs >= MLX4_MAX_NUM_VF) { + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; + total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { + nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; + if (nvfs[i] < 0) { + dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); + return -EINVAL; + } + } + for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; + i++) { + prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; + if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { + dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); + return -EINVAL; + } + } + if (total_vfs >= MLX4_MAX_NUM_VF) { dev_err(&pdev->dev, "Requested more VF's (%d) than allowed (%d)\n", - num_vfs, MLX4_MAX_NUM_VF - 1); + total_vfs, MLX4_MAX_NUM_VF - 1); return -EINVAL; } - if (num_vfs < 0) { - pr_err("num_vfs module parameter cannot be negative\n"); - return -EINVAL; + for (i = 0; i < MLX4_MAX_PORTS; i++) { + if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + dev_err(&pdev->dev, + "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + nvfs[i] + nvfs[2], i + 1, + MLX4_MAX_NUM_VF_P_PORT - 1); + return -EINVAL; + } } + + /* * Check for BARs. */ @@ -2293,11 +2326,23 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { /* When acting as pf, we normally skip vfs unless explicitly * requested to probe them. */ - if (num_vfs && extended_func_num(pdev) > probe_vf) { - mlx4_warn(dev, "Skipping virtual function:%d\n", - extended_func_num(pdev)); - err = -ENODEV; - goto err_free_dev; + if (total_vfs) { + unsigned vfs_offset = 0; + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && + vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset += nvfs[i], i++) + ; + if (i == sizeof(nvfs)/sizeof(nvfs[0])) { + err = -ENODEV; + goto err_free_dev; + } + if ((extended_func_num(pdev) - vfs_offset) + > prb_vf[i]) { + mlx4_warn(dev, "Skipping virtual function:%d\n", + extended_func_num(pdev)); + err = -ENODEV; + goto err_free_dev; + } } mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); dev->flags |= MLX4_FLAG_SLAVE; @@ -2317,18 +2362,30 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) } } - if (num_vfs) { - mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); - err = pci_enable_sriov(pdev, num_vfs); - if (err) { - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", - err); + if (total_vfs) { + mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", + total_vfs); + dev->dev_vfs = kzalloc( + total_vfs * sizeof(*dev->dev_vfs), + GFP_KERNEL); + if (NULL == dev->dev_vfs) { + mlx4_err(dev, "Failed to allocate memory for VFs\n"); err = 0; } else { - mlx4_warn(dev, "Running in master mode\n"); - dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; - dev->num_vfs = num_vfs; + atomic_inc(&pf_loading); + err = pci_enable_sriov(pdev, total_vfs); + atomic_dec(&pf_loading); + if (err) { + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", + err); + err = 0; + } else { + mlx4_warn(dev, "Running in master mode\n"); + dev->flags |= MLX4_FLAG_SRIOV | + MLX4_FLAG_MASTER; + dev->num_vfs = total_vfs; + sriov_initialized = 1; + } } } @@ -2393,12 +2450,37 @@ slave_start: /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { + unsigned sum = 0; err = mlx4_multi_func_init(dev); if (err) { mlx4_err(dev, "Failed to init master mfunc" "interface, aborting.\n"); goto err_close; } + if (sriov_initialized) { + int ib_ports = 0; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_ports++; + + if (ib_ports && + (num_vfs_argc > 1 || probe_vfs_argc > 1)) { + mlx4_err(dev, + "Invalid syntax of num_vfs/probe_vfs " + "with IB port. Single port VFs syntax" + " is only supported when all ports " + "are configured as ethernet\n"); + goto err_close; + } + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { + unsigned j; + for (j = 0; j < nvfs[i]; ++sum, ++j) { + dev->dev_vfs[sum].min_port = + i < 2 ? i + 1 : 1; + dev->dev_vfs[sum].n_ports = i < 2 ? 1 : + dev->caps.num_ports; + } + } + } } err = mlx4_alloc_eq_table(dev); @@ -2506,6 +2588,8 @@ err_rel_own: if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); + kfree(priv->dev.dev_vfs); + err_free_dev: kfree(priv); @@ -2592,6 +2676,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); + kfree(dev->dev_vfs); kfree(priv); pci_release_regions(pdev); @@ -2670,7 +2755,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) { - int ret = __mlx4_init_one(pdev, 0); + const struct pci_device_id *id; + int ret; + + id = pci_match_id(mlx4_pci_table, pdev); + ret = __mlx4_init_one(pdev, id->driver_data); return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } @@ -2684,6 +2773,7 @@ static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, + .shutdown = mlx4_remove_one, .remove = mlx4_remove_one, .err_handler = &mlx4_err_handler, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index db7dc0b6667d..80ccb4edf825 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1387,9 +1387,12 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { u32 qpn = (u32) vhcr->in_param & 0xffffffff; - u8 port = vhcr->in_param >> 62; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); enum mlx4_steer_type steer = vhcr->in_modifier; + if (port < 0) + return -EINVAL; + /* Promiscuous unicast is not allowed in mfunc */ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 7aec6c833973..cf8be41abb36 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -788,6 +788,10 @@ enum { MLX4_USE_RR = 1, }; +struct mlx4_roce_gid_entry { + u8 raw[16]; +}; + struct mlx4_priv { struct mlx4_dev dev; @@ -834,6 +838,7 @@ struct mlx4_priv { int fs_hash_mode; u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; __be64 slave_node_guids[MLX4_MFUNC_MAX]; + struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS]; atomic_t opreq_count; struct work_struct opreq_task; @@ -1242,11 +1247,6 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); -int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd); int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); @@ -1282,4 +1282,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); void mlx4_init_quotas(struct mlx4_dev *dev); +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); +/* Returns the VF index of slave */ +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); + #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index b57e8c87a34e..7a733c287744 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -187,6 +187,13 @@ enum { #define GET_AVG_PERF_COUNTER(cnt) (0) #endif /* MLX4_EN_PERF_STAT */ +/* Constants for TX flow */ +enum { + MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ + MAX_BF = 256, + MIN_PKT_LEN = 17, +}; + /* * Configurables */ @@ -267,10 +274,13 @@ struct mlx4_en_tx_ring { unsigned long bytes; unsigned long packets; unsigned long tx_csum; + unsigned long queue_stopped; + unsigned long wake_queue; struct mlx4_bf bf; bool bf_enabled; struct netdev_queue *tx_queue; int hwtstamp_tx_type; + int inline_thold; }; struct mlx4_en_rx_desc { @@ -346,6 +356,7 @@ struct mlx4_en_port_profile { u8 tx_pause; u8 tx_ppp; int rss_rings; + int inline_thold; }; struct mlx4_en_profile { @@ -548,6 +559,10 @@ struct mlx4_en_priv { struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; +#ifdef CONFIG_MLX4_EN_VXLAN + struct work_struct vxlan_add_task; + struct work_struct vxlan_del_task; +#endif struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_port_stats port_stats; @@ -574,6 +589,7 @@ struct mlx4_en_priv { struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT]; #endif u64 tunnel_reg_id; + __be16 vxlan_port; }; enum mlx4_en_wol { @@ -737,7 +753,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, int cq, int user_prio); void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); - +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride, int node); @@ -786,7 +802,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); #define MLX4_EN_NUM_SELF_TEST 5 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); -u64 mlx4_en_mac_to_u64(u8 *addr); void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); /* diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index a58bcbf1b806..cfcad26ed40f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -505,6 +505,84 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) mlx4_free_cmd_mailbox(dev, outmailbox); return err; } +static struct mlx4_roce_gid_entry zgid_entry; + +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) +{ + int vfs; + int slave_gid = slave; + unsigned i; + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return MLX4_ROCE_PF_GIDS; + + /* Slave is a VF */ + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) + return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; + return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; +} + +int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) +{ + int gids; + unsigned i; + int slave_gid = slave; + int vfs; + + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return 0; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + if (slave_gid <= gids % vfs) + return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); + + return MLX4_ROCE_PF_GIDS + (gids % vfs) + + ((gids / vfs) * (slave_gid - 1)); +} +EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) @@ -515,14 +593,18 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, struct mlx4_slave_state *slave_st = &master->slave_state[slave]; struct mlx4_set_port_rqp_calc_context *qpn_context; struct mlx4_set_port_general_context *gen_context; + struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; int reset_qkey_viols; int port; int is_eth; + int num_gids; + int base; u32 in_modifier; u32 promisc; u16 mtu, prev_mtu; int err; - int i; + int i, j; + int offset; __be32 agg_cap_mask; __be32 slave_cap_mask; __be32 new_cap_mask; @@ -535,7 +617,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, /* Slaves cannot perform SET_PORT operations except changing MTU */ if (is_eth) { if (slave != dev->caps.function && - in_modifier != MLX4_SET_PORT_GENERAL) { + in_modifier != MLX4_SET_PORT_GENERAL && + in_modifier != MLX4_SET_PORT_GID_TABLE) { mlx4_warn(dev, "denying SET_PORT for slave:%d\n", slave); return -EINVAL; @@ -581,6 +664,67 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, gen_context->mtu = cpu_to_be16(master->max_mtu[port]); break; + case MLX4_SET_PORT_GID_TABLE: + /* change to MULTIPLE entries: number of guest's gids + * need a FOR-loop here over number of gids the guest has. + * 1. Check no duplicates in gids passed by slave + */ + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < num_gids; gid_entry_mbox++, i++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + gid_entry_mb1 = gid_entry_mbox + 1; + for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) { + if (!memcmp(gid_entry_mb1->raw, + zgid_entry.raw, sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw, + sizeof(gid_entry_mbox->raw))) { + /* found duplicate */ + return -EINVAL; + } + } + } + + /* 2. Check that do not have duplicates in OTHER + * entries in the port GID table + */ + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (i >= base && i < base + num_gids) + continue; /* don't compare to slave's current gids */ + gid_entry_tbl = &priv->roce_gids[port - 1][i]; + if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) + continue; + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (j = 0; j < num_gids; gid_entry_mbox++, j++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, + sizeof(gid_entry_tbl->raw))) { + /* found duplicate */ + mlx4_warn(dev, "requested gid entry for slave:%d " + "is a duplicate of gid at index %d\n", + slave, i); + return -EINVAL; + } + } + } + + /* insert slave GIDs with memcpy, starting at slave's base index */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) + memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); + + /* Now, copy roce port gids table to current mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); + + break; } return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, @@ -646,6 +790,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { + int port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier & 0xFF); + + if (port < 0) + return -EINVAL; + + vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | + (port & 0xFF); + return mlx4_common_set_port(dev, slave, vhcr->in_modifier, vhcr->op_modifier, inbox); } @@ -835,7 +988,7 @@ struct mlx4_set_port_vxlan_context { u8 steering; }; -int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering) +int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable) { int err; u32 in_mod; @@ -849,7 +1002,8 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering) memset(context, 0, sizeof(*context)); context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; - context->enable_flags = VXLAN_ENABLE; + if (enable) + context->enable_flags = VXLAN_ENABLE; context->steering = steering; in_mod = MLX4_SET_PORT_VXLAN << 8 | port; @@ -927,3 +1081,108 @@ void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap) *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; } EXPORT_SYMBOL(mlx4_set_stats_bitmap); + +int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, + int *slave_id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, found_ix = -1; + int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + struct mlx4_slaves_pport slaves_pport; + unsigned num_vfs; + int slave_gid; + + if (!mlx4_is_mfunc(dev)) + return -EINVAL; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { + found_ix = i; + break; + } + } + + if (found_ix >= 0) { + if (found_ix < MLX4_ROCE_PF_GIDS) + slave_gid = 0; + else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * + (vf_gids / num_vfs + 1)) + slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) / + (vf_gids / num_vfs + 1)) + 1; + else + slave_gid = + ((found_ix - MLX4_ROCE_PF_GIDS - + ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / + (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; + + if (slave_gid) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_active_ports actv_ports; + struct mlx4_slaves_pport slaves_pport_actv; + unsigned max_port_p_one; + int num_slaves_before = 1; + + for (i = 1; i < port; i++) { + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + num_slaves_before += bitmap_weight( + slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + + if (slave_gid < num_slaves_before) { + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(port - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid += bitmap_weight( + slaves_pport_actv.slaves, + dev->num_vfs + 1) - + num_slaves_before; + } + actv_ports = mlx4_get_active_ports(dev, slave_gid); + max_port_p_one = find_first_bit( + actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, + dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + if (i == port) + continue; + bitmap_zero(exclusive_ports.ports, + dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid += bitmap_weight( + slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + } + *slave_id = slave_gid; + } + + return (found_ix >= 0) ? 0 : -EINVAL; +} +EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid); + +int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, + u8 *gid) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev)) + return -EINVAL; + + memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); + return 0; +} +EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 57428a0cb9dd..3b5f53ef29b2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -52,6 +52,8 @@ struct mac_res { struct list_head list; u64 mac; + int ref_count; + u8 smac_index; u8 port; }; @@ -219,6 +221,11 @@ struct res_fs_rule { int qpn; }; +static int mlx4_is_eth(struct mlx4_dev *dev, int port) +{ + return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1; +} + static void *res_tracker_lookup(struct rb_root *root, u64 res_id) { struct rb_node *node = root->rb_node; @@ -461,6 +468,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) spin_lock_init(&res_alloc->alloc_lock); for (t = 0; t < dev->num_vfs + 1; t++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, t); switch (i) { case RES_QP: initialize_res_quotas(dev, res_alloc, RES_QP, @@ -490,10 +499,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; case RES_MAC: if (t == mlx4_master_func_num(dev)) { - res_alloc->quota[t] = MLX4_MAX_MAC_NUM; + int max_vfs_pport = 0; + /* Calculate the max vfs per port for */ + /* both ports. */ + for (j = 0; j < dev->caps.num_ports; + j++) { + struct mlx4_slaves_pport slaves_pport = + mlx4_phys_to_slaves_pport(dev, j + 1); + unsigned current_slaves = + bitmap_weight(slaves_pport.slaves, + dev->caps.num_ports) - 1; + if (max_vfs_pport < current_slaves) + max_vfs_pport = + current_slaves; + } + res_alloc->quota[t] = + MLX4_MAX_MAC_NUM - + 2 * max_vfs_pport; res_alloc->guaranteed[t] = 2; for (j = 0; j < MLX4_MAX_PORTS; j++) - res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; + res_alloc->res_port_free[j] = + MLX4_MAX_MAC_NUM; } else { res_alloc->quota[t] = MLX4_MAX_MAC_NUM; res_alloc->guaranteed[t] = 2; @@ -521,9 +547,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; } if (i == RES_MAC || i == RES_VLAN) { - for (j = 0; j < MLX4_MAX_PORTS; j++) - res_alloc->res_port_rsvd[j] += - res_alloc->guaranteed[t]; + for (j = 0; j < dev->caps.num_ports; j++) + if (test_bit(j, actv_ports.ports)) + res_alloc->res_port_rsvd[j] += + res_alloc->guaranteed[t]; } else { res_alloc->res_reserved += res_alloc->guaranteed[t]; } @@ -600,15 +627,37 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, struct mlx4_qp_context *qp_ctx = inbox->buf + 8; enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + int port; - if (MLX4_QP_ST_UD == ts) - qp_ctx->pri_path.mgid_index = 0x80 | slave; - - if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) { - if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) - qp_ctx->pri_path.mgid_index = slave & 0x7F; - if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) - qp_ctx->alt_path.mgid_index = slave & 0x7F; + if (MLX4_QP_ST_UD == ts) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) + qp_ctx->pri_path.mgid_index = + mlx4_get_base_gid_ix(dev, slave, port) | 0x80; + else + qp_ctx->pri_path.mgid_index = slave | 0x80; + + } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->pri_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->pri_path.mgid_index &= 0x7f; + } else { + qp_ctx->pri_path.mgid_index = slave & 0x7F; + } + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->alt_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->alt_path.mgid_index &= 0x7f; + } else { + qp_ctx->alt_path.mgid_index = slave & 0x7F; + } + } } } @@ -619,7 +668,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; - u32 qp_type; int port; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; @@ -627,12 +675,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev, vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (MLX4_VGT != vp_oper->state.default_vlan) { - qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; - if (MLX4_QP_ST_RC == qp_type || - (MLX4_QP_ST_UD == qp_type && - !mlx4_is_qp_reserved(dev, qpn))) - return -EINVAL; - /* the reserved QPs (special, proxy, tunnel) * do not operate over vlans */ @@ -1659,11 +1701,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; } -static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) +static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, + u8 smac_index, u64 *mac) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->smac_index == smac_index && res->port == (u8) port) { + *mac = res->mac; + return 0; + } + } + return -ENOENT; +} + +static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct mac_res *res; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + /* mac found. update ref count */ + ++res->ref_count; + return 0; + } + } if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) return -EINVAL; @@ -1674,6 +1744,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) } res->mac = mac; res->port = (u8) port; + res->smac_index = smac_index; + res->ref_count = 1; list_add_tail(&res->list, &tracker->slave_list[slave].res_list[RES_MAC]); return 0; @@ -1690,9 +1762,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, list_for_each_entry_safe(res, tmp, mac_list, list) { if (res->mac == mac && res->port == (u8) port) { - list_del(&res->list); - mlx4_release_resource(dev, slave, RES_MAC, 1, port); - kfree(res); + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_MAC, 1, port); + kfree(res); + } break; } } @@ -1705,10 +1779,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave) struct list_head *mac_list = &tracker->slave_list[slave].res_list[RES_MAC]; struct mac_res *res, *tmp; + int i; list_for_each_entry_safe(res, tmp, mac_list, list) { list_del(&res->list); - __mlx4_unregister_mac(dev, res->port, res->mac); + /* dereference the mac the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_mac(dev, res->port, res->mac); mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); kfree(res); } @@ -1720,21 +1797,28 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, int err = -EINVAL; int port; u64 mac; + u8 smac_index; if (op != RES_OP_RESERVE_AND_MAP) return err; port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; mac = in_param; err = __mlx4_register_mac(dev, port, mac); if (err >= 0) { + smac_index = err; set_param_l(out_param, err); err = 0; } if (!err) { - err = mac_add_to_slave(dev, slave, mac, port); + err = mac_add_to_slave(dev, slave, mac, port, smac_index); if (err) __mlx4_unregister_mac(dev, port, mac); } @@ -1831,6 +1915,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (!port || op != RES_OP_RESERVE_AND_MAP) return -EINVAL; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ if (!in_port && port > 0 && port <= dev->caps.num_ports) { slave_state[slave].old_vlan_api = true; @@ -2128,6 +2217,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE_AND_MAP: port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; mac_del_from_slave(dev, slave, in_param, port); __mlx4_unregister_mac(dev, port, in_param); break; @@ -2147,6 +2241,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; int err = 0; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; switch (op) { case RES_OP_RESERVE_AND_MAP: if (slave_state[slave].old_vlan_api) @@ -2734,6 +2833,8 @@ static int verify_qp_parameters(struct mlx4_dev *dev, u32 qp_type; struct mlx4_qp_context *qp_ctx; enum mlx4_qp_optpar optpar; + int port; + int num_gids; qp_ctx = inbox->buf + 8; qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; @@ -2741,6 +2842,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, switch (qp_type) { case MLX4_QP_ST_RC: + case MLX4_QP_ST_XRC: case MLX4_QP_ST_UC: switch (transition) { case QP_TRANS_INIT2RTR: @@ -2749,13 +2851,24 @@ static int verify_qp_parameters(struct mlx4_dev *dev, case QP_TRANS_SQD2SQD: case QP_TRANS_SQD2RTS: if (slave != mlx4_master_func_num(dev)) - /* slaves have only gid index 0 */ - if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) - if (qp_ctx->pri_path.mgid_index) + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->pri_path.mgid_index >= num_gids) return -EINVAL; - if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) - if (qp_ctx->alt_path.mgid_index) + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->alt_path.mgid_index >= num_gids) return -EINVAL; + } break; default: break; @@ -3268,6 +3381,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } +static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); + u8 pri_sched_queue; + int port = mlx4_slave_convert_port( + dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; + + if (port < 0) + return -EINVAL; + + pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | + ((port & 1) << 6); + + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH || + mlx4_is_eth(dev, port + 1)) { + qpc->pri_path.sched_queue = pri_sched_queue; + } + + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = mlx4_slave_convert_port( + dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) + + 1) - 1; + if (port < 0) + return -EINVAL; + qpc->alt_path.sched_queue = + (qpc->alt_path.sched_queue & ~(1 << 6)) | + (port & 1) << 6; + } + return 0; +} + +static int roce_verify_mac(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + u64 mac; + int port; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + u8 sched = *(u8 *)(inbox->buf + 64); + u8 smac_ix; + + port = (sched >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { + smac_ix = qpc->pri_path.grh_mylmc & 0x7f; + if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) + return -ENOENT; + } + return 0; +} + int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3286,10 +3451,16 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, u8 orig_vlan_index = qpc->pri_path.vlan_index; u8 orig_feup = qpc->pri_path.feup; + err = adjust_qp_sched_queue(dev, slave, qpc, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); if (err) return err; + if (roce_verify_mac(dev, slave, qpc, inbox)) + return -EINVAL; + update_pkey_index(dev, slave, inbox); update_gid(dev, inbox, (u8)slave); adjust_proxy_tun_qkey(dev, vhcr, qpc); @@ -3334,6 +3505,9 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); if (err) return err; @@ -3353,6 +3527,9 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); if (err) return err; @@ -3371,6 +3548,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { struct mlx4_qp_context *context = inbox->buf + 8; + int err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; adjust_proxy_tun_qkey(dev, vhcr, context); return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } @@ -3384,6 +3564,9 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); if (err) return err; @@ -3403,6 +3586,9 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); if (err) return err; @@ -3506,16 +3692,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, return err; } -static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - int block_loopback, enum mlx4_protocol prot, +static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, + u8 gid[16], int block_loopback, enum mlx4_protocol prot, enum mlx4_steer_type type, u64 *reg_id) { switch (dev->caps.steering_mode) { - case MLX4_STEERING_MODE_DEVICE_MANAGED: - return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5], + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, block_loopback, prot, reg_id); + } case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + gid[5] = port; + } return mlx4_qp_attach_common(dev, qp, gid, block_loopback, prot, type); default: @@ -3523,9 +3719,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], } } -static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - enum mlx4_protocol prot, enum mlx4_steer_type type, - u64 reg_id) +static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot, + enum mlx4_steer_type type, u64 reg_id) { switch (dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: @@ -3562,7 +3758,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, qp.qpn = qpn; if (attach) { - err = qp_attach(dev, &qp, gid, block_loopback, prot, + err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, type, ®_id); if (err) { pr_err("Fail to attach rule to qp 0x%x\n", qpn); @@ -3698,6 +3894,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, return -EOPNOTSUPP; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (ctrl->port <= 0) + return -EINVAL; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { @@ -3816,16 +4015,6 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, return err; } -int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - return -EPERM; -} - - static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) { struct res_gid *rgid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 23b7e2d35a93..c3eee5f70051 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = &dev->priv.eq_table; int num_eqs = 1 << dev->caps.log_max_eq; int nvec; - int err; int i; nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; @@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) for (i = 0; i < nvec; i++) table->msix_arr[i].entry = i; -retry: - table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; - err = pci_enable_msix(dev->pdev, table->msix_arr, nvec); - if (err <= 0) { - return err; - } else if (err > 2) { - nvec = err; - goto retry; - } + nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, + MLX5_EQ_VEC_COMP_BASE, nvec); + if (nvec < 0) + return nvec; - mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec); + table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; return 0; } @@ -446,6 +440,7 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) mlx5_init_cq_table(dev); mlx5_init_qp_table(dev); mlx5_init_srq_table(dev); + mlx5_init_mr_table(dev); return 0; @@ -537,7 +532,6 @@ static int __init init(void) return 0; - mlx5_health_cleanup(); err_debug: mlx5_unregister_debugfs(); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 35e514dc7b7d..4cc927649404 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -36,11 +36,24 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" +void mlx5_init_mr_table(struct mlx5_core_dev *dev) +{ + struct mlx5_mr_table *table = &dev->priv.mr_table; + + rwlock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev) +{ +} + int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_create_mkey_mbox_in *in, int inlen, mlx5_cmd_cbk_t callback, void *context, struct mlx5_create_mkey_mbox_out *out) { + struct mlx5_mr_table *table = &dev->priv.mr_table; struct mlx5_create_mkey_mbox_out lout; int err; u8 key; @@ -73,14 +86,21 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(lout.mkey), key, mr->key); + /* connect to MR tree */ + write_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); + write_unlock_irq(&table->lock); + return err; } EXPORT_SYMBOL(mlx5_core_create_mkey); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) { + struct mlx5_mr_table *table = &dev->priv.mr_table; struct mlx5_destroy_mkey_mbox_in in; struct mlx5_destroy_mkey_mbox_out out; + unsigned long flags; int err; memset(&in, 0, sizeof(in)); @@ -95,6 +115,10 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); + write_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); + write_unlock_irqrestore(&table->lock, flags); + return err; } EXPORT_SYMBOL(mlx5_core_destroy_mkey); @@ -144,3 +168,64 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, return err; } EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); + +int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, + int npsvs, u32 *sig_index) +{ + struct mlx5_allocate_psv_in in; + struct mlx5_allocate_psv_out out; + int i, err; + + if (npsvs > MLX5_MAX_PSVS) + return -EINVAL; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV); + in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + mlx5_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + + if (out.hdr.status) { + mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); + return mlx5_cmd_status_to_err(&out.hdr); + } + + for (i = 0; i < npsvs; i++) + sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff; + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_psv); + +int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) +{ + struct mlx5_destroy_psv_in in; + struct mlx5_destroy_psv_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.psv_number = cpu_to_be32(psv_num); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err); + goto out; + } + + if (out.hdr.status) { + mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); + err = mlx5_cmd_status_to_err(&out.hdr); + goto out; + } + +out: + return err; +} +EXPORT_SYMBOL(mlx5_core_destroy_psv); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 727b546a9eb8..e0c92e0e5e1d 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -23,6 +23,7 @@ #include <linux/crc32.h> #include <linux/mii.h> #include <linux/eeprom_93cx6.h> +#include <linux/regulator/consumer.h> #include <linux/spi/spi.h> @@ -83,6 +84,7 @@ union ks8851_tx_hdr { * @rc_rxqcr: Cached copy of KS_RXQCR. * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. + * @vdd_reg: Optional regulator supplying the chip * * The @lock ensures that the chip is protected when certain operations are * in progress. When the read or write packet transfer is in progress, most @@ -130,6 +132,7 @@ struct ks8851_net { struct spi_transfer spi_xfer2[2]; struct eeprom_93cx6 eeprom; + struct regulator *vdd_reg; }; static int msg_enable; @@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi) ks->spidev = spi; ks->tx_space = 6144; + ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); + if (IS_ERR(ks->vdd_reg)) { + ret = PTR_ERR(ks->vdd_reg); + if (ret == -EPROBE_DEFER) + goto err_reg; + } else { + ret = regulator_enable(ks->vdd_reg); + if (ret) { + dev_err(&spi->dev, "regulator enable fail: %d\n", + ret); + goto err_reg_en; + } + } + + mutex_init(&ks->lock); spin_lock_init(&ks->statelock); @@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi) err_netdev: free_irq(ndev->irq, ks); -err_id: err_irq: +err_id: + if (!IS_ERR(ks->vdd_reg)) + regulator_disable(ks->vdd_reg); +err_reg_en: + if (!IS_ERR(ks->vdd_reg)) + regulator_put(ks->vdd_reg); +err_reg: free_netdev(ndev); return ret; } @@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi) unregister_netdev(priv->netdev); free_irq(spi->irq, priv); + if (!IS_ERR(priv->vdd_reg)) { + regulator_disable(priv->vdd_reg); + regulator_put(priv->vdd_reg); + } free_netdev(priv->netdev); return 0; diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index ce84dc289c8f..14ac0e2bc09f 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4832,7 +4832,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb) skb->csum = old->csum; skb_set_network_header(skb, ETH_HLEN); - dev_kfree_skb(old); + dev_consume_skb_any(old); } /** diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 68026f7e8ba3..130f6b204efa 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp) status = 0; if (myri10ge_msi) { if (mgp->num_slices > 1) { - status = - pci_enable_msix(pdev, mgp->msix_vectors, - mgp->num_slices); - if (status == 0) { - mgp->msix_enabled = 1; - } else { + status = pci_enable_msix_range(pdev, mgp->msix_vectors, + mgp->num_slices, mgp->num_slices); + if (status < 0) { dev_err(&pdev->dev, "Error %d setting up MSI-X\n", status); return status; } + mgp->msix_enabled = 1; } if (mgp->msix_enabled == 0) { status = pci_enable_msi(pdev); @@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp) mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors), GFP_KERNEL); if (mgp->msix_vectors == NULL) - goto disable_msix; + goto no_msix; for (i = 0; i < mgp->num_slices; i++) { mgp->msix_vectors[i].entry = i; } while (mgp->num_slices > 1) { - /* make sure it is a power of two */ - while (!is_power_of_2(mgp->num_slices)) - mgp->num_slices--; + mgp->num_slices = rounddown_pow_of_two(mgp->num_slices); if (mgp->num_slices == 1) - goto disable_msix; - status = pci_enable_msix(pdev, mgp->msix_vectors, - mgp->num_slices); - if (status == 0) { - pci_disable_msix(pdev); + goto no_msix; + status = pci_enable_msix_range(pdev, + mgp->msix_vectors, + mgp->num_slices, + mgp->num_slices); + if (status < 0) + goto no_msix; + + pci_disable_msix(pdev); + + if (status == mgp->num_slices) { if (old_allocated) kfree(old_fw); return; - } - if (status > 0) + } else { mgp->num_slices = status; - else - goto disable_msix; + } } -disable_msix: +no_msix: if (mgp->msix_vectors != NULL) { kfree(mgp->msix_vectors); mgp->msix_vectors = NULL; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 9eeddbd0b2c7..a2844ff322c4 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2914,6 +2914,9 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) struct RxD1 *rxdp1; struct RxD3 *rxdp3; + if (budget <= 0) + return napi_pkts; + get_info = ring_data->rx_curr_get_info; get_block = get_info.block_index; memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); @@ -3792,9 +3795,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) writeq(rx_mat, &bar0->rx_mat); readq(&bar0->rx_mat); - ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); + ret = pci_enable_msix_range(nic->pdev, nic->entries, + nic->num_entries, nic->num_entries); /* We fail init if error or we get less vectors than min required */ - if (ret) { + if (ret < 0) { DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n"); kfree(nic->entries); swstats->mem_freed += nic->num_entries * @@ -4045,7 +4049,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) if (!is_s2io_card_up(sp)) { DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", dev->name); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -4118,7 +4122,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); s2io_stop_tx_queue(sp, fifo->fifo_no); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); spin_unlock_irqrestore(&fifo->tx_lock, flags); return NETDEV_TX_OK; } @@ -4240,7 +4244,7 @@ pci_map_failed: swstats->pci_map_fail_cnt++; s2io_stop_tx_queue(sp, fifo->fifo_no); swstats->mem_freed += skb->truesize; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); spin_unlock_irqrestore(&fifo->tx_lock, flags); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index e46e8698e630..d107bcbb8543 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -368,6 +368,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); + if (ring->budget <= 0) + goto out; + do { prefetch((char *)dtr + L1_CACHE_BYTES); rx_priv = vxge_hw_ring_rxd_private_get(dtr); @@ -525,6 +528,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, if (first_dtr) vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); +out: vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); @@ -820,7 +824,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb->len <= 0)) { vxge_debug_tx(VXGE_ERR, "%s: Buffer has no data..", dev->name); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -829,7 +833,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(!is_vxge_card_up(vdev))) { vxge_debug_tx(VXGE_ERR, "%s: vdev not initialized", dev->name); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -839,7 +843,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) vxge_debug_tx(VXGE_ERR, "%s: Failed to store the mac address", dev->name); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } @@ -986,7 +990,7 @@ _exit1: vxge_hw_fifo_txdl_free(fifo_hw, dtr); _exit0: netif_tx_stop_queue(fifo->txq); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2349,12 +2353,18 @@ start: vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; vdev->vxge_entries[j].in_use = 0; - ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); - if (ret > 0) { + ret = pci_enable_msix_range(vdev->pdev, + vdev->entries, 3, vdev->intr_cnt); + if (ret < 0) { + ret = -ENODEV; + goto enable_msix_failed; + } else if (ret < vdev->intr_cnt) { + pci_disable_msix(vdev->pdev); + vxge_debug_init(VXGE_ERR, "%s: MSI-X enable failed for %d vectors, ret: %d", VXGE_DRIVER_NAME, vdev->intr_cnt, ret); - if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) { + if (max_config_vpath != VXGE_USE_DEFAULT) { ret = -ENODEV; goto enable_msix_failed; } @@ -2368,9 +2378,6 @@ start: vxge_close_vpaths(vdev, temp); vdev->no_of_vpath = temp; goto start; - } else if (ret < 0) { - ret = -ENODEV; - goto enable_msix_failed; } return 0; @@ -3131,12 +3138,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) u64 packets, bytes, multicast; do { - start = u64_stats_fetch_begin_bh(&rxstats->syncp); + start = u64_stats_fetch_begin_irq(&rxstats->syncp); packets = rxstats->rx_frms; multicast = rxstats->rx_mcast; bytes = rxstats->rx_bytes; - } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); net_stats->rx_packets += packets; net_stats->rx_bytes += bytes; @@ -3146,11 +3153,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) net_stats->rx_dropped += rxstats->rx_dropped; do { - start = u64_stats_fetch_begin_bh(&txstats->syncp); + start = u64_stats_fetch_begin_irq(&txstats->syncp); packets = txstats->tx_frms; bytes = txstats->tx_bytes; - } while (u64_stats_fetch_retry_bh(&txstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&txstats->syncp, start)); net_stats->tx_packets += packets; net_stats->tx_bytes += bytes; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 70cf97fe67f2..fddb464aeab3 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1753,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) /* software stats */ do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); storage->rx_packets = np->stat_rx_packets; storage->rx_bytes = np->stat_rx_bytes; storage->rx_dropped = np->stat_rx_dropped; storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); storage->tx_packets = np->stat_tx_packets; storage->tx_bytes = np->stat_tx_bytes; storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); /* If the nic supports hw counters then retrieve latest values */ if (np->driver_data & DEV_HAS_STATISTICS_V123) { @@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) if (pci_dma_mapping_error(np->pci_dev, np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ - kfree_skb(skb); + dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; u64_stats_update_end(&np->swstats_tx_syncp); @@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) tmp_tx_ctx = np->first_tx_ctx; } while (tmp_tx_ctx != np->put_tx_ctx); - kfree_skb(skb); + dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; @@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, if (pci_dma_mapping_error(np->pci_dev, np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ - kfree_skb(skb); + dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; u64_stats_update_end(&np->swstats_tx_syncp); @@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) tmp_tx_ctx = np->first_tx_ctx; } while (tmp_tx_ctx != np->put_tx_ctx); - kfree_skb(skb); + dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; @@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) { struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); - int ret = 1; + int ret; int i; irqreturn_t (*handler)(int foo, void *data); @@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test) if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) np->msi_x_entry[i].entry = i; - ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK)); - if (ret == 0) { + ret = pci_enable_msix_range(np->pci_dev, + np->msi_x_entry, + np->msi_flags & NV_MSI_X_VECTORS_MASK, + np->msi_flags & NV_MSI_X_VECTORS_MASK); + if (ret > 0) { np->msi_flags |= NV_MSI_X_ENABLED; if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { /* Request irq for rx handling */ sprintf(np->name_rx, "%s-rx", dev->name); - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, - nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, + nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); + if (ret) { netdev_info(dev, "request_irq failed for rx %d\n", ret); @@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test) } /* Request irq for tx handling */ sprintf(np->name_tx, "%s-tx", dev->name); - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, - nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, + nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); + if (ret) { netdev_info(dev, "request_irq failed for tx %d\n", ret); @@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test) } /* Request irq for link and timer handling */ sprintf(np->name_other, "%s-other", dev->name); - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, - nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, + nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); + if (ret) { netdev_info(dev, "request_irq failed for link %d\n", ret); @@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test) set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); } else { /* Request irq for all interrupts */ - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, + handler, IRQF_SHARED, dev->name, dev); + if (ret) { netdev_info(dev, "request_irq failed %d\n", ret); @@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test) writel(0, base + NvRegMSIXMap1); } netdev_info(dev, "MSI-X enabled\n"); + return 0; } } - if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { + if (np->msi_flags & NV_MSI_CAPABLE) { ret = pci_enable_msi(np->pci_dev); if (ret == 0) { np->msi_flags |= NV_MSI_ENABLED; - if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { + ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); + if (ret) { netdev_info(dev, "request_irq failed %d\n", ret); pci_disable_msi(np->pci_dev); @@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test) /* enable msi vector 0 */ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); netdev_info(dev, "MSI enabled\n"); + return 0; } } - if (ret != 0) { - if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) - goto out_err; - } + if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) + goto out_err; return 0; out_free_tx: diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 464e91058c81..73e66838cfef 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -120,10 +120,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); static void pch_gbe_set_multi(struct net_device *netdev); -static struct sock_filter ptp_filter[] = { - PTP_FILTER -}; - static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) { u8 *data = skb->data; @@ -131,7 +127,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) u16 *hi, *id; u32 lo; - if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE) + if (ptp_classify_raw(skb) == PTP_CLASS_NONE) return 0; offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; @@ -2635,11 +2631,6 @@ static int pch_gbe_probe(struct pci_dev *pdev, adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, PCI_DEVFN(12, 4)); - if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { - dev_err(&pdev->dev, "Bad ptp filter\n"); - ret = -EINVAL; - goto err_free_netdev; - } netdev->netdev_ops = &pch_gbe_netdev_ops; netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index f59e6be4a66e..c14bd3116e45 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -56,6 +56,16 @@ config QLCNIC_DCB mode of DCB is supported. PG and PFC values are related only to Tx. +config QLCNIC_VXLAN + bool "Virtual eXtensible Local Area Network (VXLAN) offload support" + default n + depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m) + ---help--- + This enables hardware offload support for VXLAN protocol over QLogic's + 84XX series adapters. + Say Y here if you want to enable hardware offload support for + Virtual eXtensible Local Area Network (VXLAN) in the driver. + config QLGE tristate "QLogic QLGE 10Gb Ethernet Driver Support" depends on PCI diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 70849dea32b1..f09c35d669b3 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, if (adapter->msix_supported) { netxen_init_msix_entries(adapter, num_msix); - err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); - if (err == 0) { + err = pci_enable_msix_range(pdev, adapter->msix_entries, + num_msix, num_msix); + if (err > 0) { adapter->flags |= NETXEN_NIC_MSIX_ENABLED; netxen_set_msix_bit(pdev, 1); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index f19f81cde134..f31bb5e9d8a9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 55 -#define QLCNIC_LINUX_VERSIONID "5.3.55" +#define _QLCNIC_LINUX_SUBVERSION 57 +#define QLCNIC_LINUX_VERSIONID "5.3.57" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -169,11 +169,20 @@ struct cmd_desc_type0 { __le64 addr_buffer2; - __le16 reference_handle; + __le16 encap_descr; /* 15:10 offset of outer L3 header, + * 9:6 number of 32bit words in outer L3 header, + * 5 offload outer L4 checksum, + * 4 offload outer L3 checksum, + * 3 Inner L4 type, TCP=0, UDP=1, + * 2 Inner L3 type, IPv4=0, IPv6=1, + * 1 Outer L3 type,IPv4=0, IPv6=1, + * 0 type of encapsulation, GRE=0, VXLAN=1 + */ __le16 mss; u8 port_ctxid; /* 7:4 ctxid 3:0 port */ - u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ - __le16 conn_id; /* IPSec offoad only */ + u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ + u8 outer_hdr_length; /* Encapsulation only */ + u8 rsvd1; __le64 addr_buffer3; __le64 addr_buffer1; @@ -183,7 +192,9 @@ struct cmd_desc_type0 { __le64 addr_buffer4; u8 eth_addr[ETH_ALEN]; - __le16 vlan_TCI; + __le16 vlan_TCI; /* In case of encapsulation, + * this is for outer VLAN + */ } __attribute__ ((aligned(64))); @@ -394,7 +405,7 @@ struct qlcnic_nic_intr_coalesce { u32 timer_out; }; -struct qlcnic_dump_template_hdr { +struct qlcnic_83xx_dump_template_hdr { u32 type; u32 offset; u32 size; @@ -411,15 +422,42 @@ struct qlcnic_dump_template_hdr { u32 rsvd[0]; }; +struct qlcnic_82xx_dump_template_hdr { + u32 type; + u32 offset; + u32 size; + u32 cap_mask; + u32 num_entries; + u32 version; + u32 timestamp; + u32 checksum; + u32 drv_cap_mask; + u32 sys_info[3]; + u32 saved_state[16]; + u32 cap_sizes[8]; + u32 rsvd[7]; + u32 capabilities; + u32 rsvd1[0]; +}; + struct qlcnic_fw_dump { u8 clr; /* flag to indicate if dump is cleared */ bool enable; /* enable/disable dump */ u32 size; /* total size of the dump */ + u32 cap_mask; /* Current capture mask */ void *data; /* dump data area */ - struct qlcnic_dump_template_hdr *tmpl_hdr; + void *tmpl_hdr; dma_addr_t phys_addr; void *dma_buffer; bool use_pex_dma; + /* Read only elements which are common between 82xx and 83xx + * template header. Update these values immediately after we read + * template header from Firmware + */ + u32 tmpl_hdr_size; + u32 version; + u32 num_entries; + u32 offset; }; /* @@ -497,6 +535,7 @@ struct qlcnic_hardware_context { u8 extend_lb_time; u8 phys_port_id[ETH_ALEN]; u8 lb_mode; + u16 vxlan_port; }; struct qlcnic_adapter_stats { @@ -511,6 +550,9 @@ struct qlcnic_adapter_stats { u64 txbytes; u64 lrobytes; u64 lso_frames; + u64 encap_lso_frames; + u64 encap_tx_csummed; + u64 encap_rx_csummed; u64 xmit_on; u64 xmit_off; u64 skb_alloc_failure; @@ -872,6 +914,10 @@ struct qlcnic_mac_vlan_list { #define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9 +#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0 +#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1 +#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4 + /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 #define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 @@ -966,6 +1012,11 @@ struct qlcnic_ipaddr { #define QLCNIC_HAS_PHYS_PORT_ID 0x40000 #define QLCNIC_TSS_RSS 0x80000 +#ifdef CONFIG_QLCNIC_VXLAN +#define QLCNIC_ADD_VXLAN_PORT 0x100000 +#define QLCNIC_DEL_VXLAN_PORT 0x200000 +#endif + #define QLCNIC_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) #define QLCNIC_IS_TSO_CAPABLE(adapter) \ @@ -1769,10 +1820,28 @@ struct qlcnic_hardware_ops { struct qlcnic_host_tx_ring *); void (*disable_tx_intr) (struct qlcnic_adapter *, struct qlcnic_host_tx_ring *); + u32 (*get_saved_state)(void *, u32); + void (*set_saved_state)(void *, u32, u32); + void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *); + u32 (*get_cap_size)(void *, int); + void (*set_sys_info)(void *, int, u32); + void (*store_cap_mask)(void *, u32); }; extern struct qlcnic_nic_template qlcnic_vf_ops; +static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->extra_capability[0] & + QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; +} + +static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->extra_capability[0] & + QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; +} + static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) { return adapter->nic_ops->start_firmware(adapter); @@ -2007,6 +2076,42 @@ static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter) adapter->ahw->hw_ops->read_phys_port_id(adapter); } +static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter, + void *t_hdr, u32 index) +{ + return adapter->ahw->hw_ops->get_saved_state(t_hdr, index); +} + +static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter, + void *t_hdr, u32 index, u32 value) +{ + adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value); +} + +static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter, + struct qlcnic_fw_dump *fw_dump) +{ + adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump); +} + +static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter, + void *tmpl_hdr, int index) +{ + return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index); +} + +static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter, + void *tmpl_hdr, int idx, u32 value) +{ + adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value); +} + +static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter, + void *tmpl_hdr, u32 mask) +{ + adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask); +} + static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 27c4f131863b..b7cffb46a75d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -77,7 +77,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2}, {QLCNIC_CMD_GET_LINK_STATUS, 2, 4}, {QLCNIC_CMD_IDC_ACK, 5, 1}, - {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1}, + {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1}, {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, @@ -87,6 +87,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50}, + {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1}, }; const u32 qlcnic_83xx_ext_reg_tbl[] = { @@ -203,7 +204,12 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .disable_sds_intr = qlcnic_83xx_disable_sds_intr, .enable_tx_intr = qlcnic_83xx_enable_tx_intr, .disable_tx_intr = qlcnic_83xx_disable_tx_intr, - + .get_saved_state = qlcnic_83xx_get_saved_state, + .set_saved_state = qlcnic_83xx_set_saved_state, + .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values, + .get_cap_size = qlcnic_83xx_get_cap_size, + .set_sys_info = qlcnic_83xx_set_sys_info, + .store_cap_mask = qlcnic_83xx_store_cap_mask, }; static struct qlcnic_nic_template qlcnic_83xx_ops = { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index f92485ca21d1..88d809c35633 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -308,6 +308,8 @@ struct qlc_83xx_reset { #define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020 struct qlcnic_adapter; +struct qlcnic_fw_dump; + struct qlc_83xx_idc { int (*state_entry) (struct qlcnic_adapter *); u64 sec_counter; @@ -526,8 +528,9 @@ enum qlc_83xx_ext_regs { }; /* Initialize/Stop NIC command bit definitions */ -#define QLC_REGISTER_DCB_AEN BIT_1 #define QLC_REGISTER_LB_IDC BIT_0 +#define QLC_REGISTER_DCB_AEN BIT_1 +#define QLC_83XX_MULTI_TENANCY_INFO BIT_29 #define QLC_INIT_FW_RESOURCES BIT_31 /* 83xx funcitons */ @@ -650,4 +653,10 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *); void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *); int qlcnic_83xx_aer_reset(struct qlcnic_adapter *); void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *); +u32 qlcnic_83xx_get_saved_state(void *, u32); +void qlcnic_83xx_set_saved_state(void *, u32, u32); +void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *); +u32 qlcnic_83xx_get_cap_size(void *, int); +void qlcnic_83xx_set_sys_info(void *, int, u32); +void qlcnic_83xx_store_cap_mask(void *, u32); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 90a2dda351ec..b48737dcd3c5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1020,10 +1020,99 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, return 0; } +#ifdef CONFIG_QLCNIC_VXLAN +#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1 +#define QLC_83XX_MATCH_ENCAP_ID BIT_2 +#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3 +#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16) + +#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1 +#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0 + +static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter) +{ + u16 port = adapter->ahw->vxlan_port; + struct qlcnic_cmd_args cmd; + int ret = 0; + + memset(&cmd, 0, sizeof(cmd)); + + ret = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_INIT_NIC_FUNC); + if (ret) + return ret; + + cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO; + cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN | + QLC_83XX_SET_VXLAN_UDP_DPORT | + QLC_83XX_VXLAN_UDP_DPORT(port); + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) + netdev_err(adapter->netdev, + "Failed to set VXLAN port %d in adapter\n", + port); + + qlcnic_free_mbx_args(&cmd); + + return ret; +} + +static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, + bool state) +{ + u16 vxlan_port = adapter->ahw->vxlan_port; + struct qlcnic_cmd_args cmd; + int ret = 0; + + memset(&cmd, 0, sizeof(cmd)); + + ret = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_INGRESS_ENCAP); + if (ret) + return ret; + + cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING : + QLCNIC_DISABLE_INGRESS_ENCAP_PARSING; + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) + netdev_err(adapter->netdev, + "Failed to %s VXLAN parsing for port %d\n", + state ? "enable" : "disable", vxlan_port); + else + netdev_info(adapter->netdev, + "%s VXLAN parsing for port %d\n", + state ? "Enabled" : "Disabled", vxlan_port); + + qlcnic_free_mbx_args(&cmd); + + return ret; +} +#endif + static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); + +#ifdef CONFIG_QLCNIC_VXLAN + if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) { + if (qlcnic_set_vxlan_port(adapter)) + return; + + if (qlcnic_set_vxlan_parsing(adapter, true)) + return; + + adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT; + } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) { + if (qlcnic_set_vxlan_parsing(adapter, false)) + return; + + adapter->ahw->vxlan_port = 0; + adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT; + } +#endif } /** @@ -1301,7 +1390,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) addr = (u64)dest; ret = qlcnic_83xx_ms_mem_write128(adapter, addr, - (u32 *)p_cache, size / 16); + p_cache, size / 16); if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); release_firmware(fw); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index acee1a5d80c6..5bacf5210aed 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -47,6 +47,12 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = { {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, + {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames), + QLC_OFF(stats.encap_lso_frames)}, + {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed), + QLC_OFF(stats.encap_tx_csummed)}, + {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed), + QLC_OFF(stats.encap_rx_csummed)}, {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), QLC_OFF(stats.skb_alloc_failure)}, {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun), @@ -1639,14 +1645,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) } if (fw_dump->clr) - dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; + dump->len = fw_dump->tmpl_hdr_size + fw_dump->size; else dump->len = 0; if (!qlcnic_check_fw_dump_state(adapter)) dump->flag = ETH_FW_DUMP_DISABLE; else - dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + dump->flag = fw_dump->cap_mask; dump->version = adapter->fw_version; return 0; @@ -1671,9 +1677,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, netdev_info(netdev, "Dump not available\n"); return -EINVAL; } + /* Copy template header first */ - copy_sz = fw_dump->tmpl_hdr->size; - hdr_ptr = (u32 *) fw_dump->tmpl_hdr; + copy_sz = fw_dump->tmpl_hdr_size; + hdr_ptr = (u32 *)fw_dump->tmpl_hdr; data = buffer; for (i = 0; i < copy_sz/sizeof(u32); i++) *data++ = cpu_to_le32(*hdr_ptr++); @@ -1681,7 +1688,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, /* Copy captured dump data */ memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); dump->len = copy_sz + fw_dump->size; - dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + dump->flag = fw_dump->cap_mask; /* Free dump area once data has been captured */ vfree(fw_dump->data); @@ -1703,7 +1710,11 @@ static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask) return -EOPNOTSUPP; } - fw_dump->tmpl_hdr->drv_cap_mask = mask; + fw_dump->cap_mask = mask; + + /* Store new capture mask in template header as well*/ + qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask); + netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask); return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 03d18a0be6ce..9f3adf4e70b5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -317,9 +317,7 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data) int qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) { - int timeout = 0; - int err = 0; - u32 done = 0; + int timeout = 0, err = 0, done = 0; while (!done) { done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)), @@ -327,10 +325,20 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) if (done == 1) break; if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { - dev_err(&adapter->pdev->dev, - "Failed to acquire sem=%d lock; holdby=%d\n", - sem, - id_reg ? QLCRD32(adapter, id_reg, &err) : -1); + if (id_reg) { + done = QLCRD32(adapter, id_reg, &err); + if (done != -1) + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock held by=%d\n", + sem, done); + else + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock", + sem); + } else { + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock", sem); + } return -EIO; } msleep(1); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 63d75617d445..cbe2399c30a0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -98,6 +98,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_GET_LINK_EVENT 0x48 #define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49 #define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A +#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E #define QLCNIC_CMD_INIT_NIC_FUNC 0x60 #define QLCNIC_CMD_STOP_NIC_FUNC 0x61 #define QLCNIC_CMD_IDC_ACK 0x63 @@ -161,6 +162,7 @@ struct qlcnic_host_sds_ring; struct qlcnic_host_tx_ring; struct qlcnic_hardware_context; struct qlcnic_adapter; +struct qlcnic_fw_dump; int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *); int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); @@ -213,4 +215,11 @@ int qlcnic_82xx_shutdown(struct pci_dev *); int qlcnic_82xx_resume(struct qlcnic_adapter *); void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed); void qlcnic_fw_poll_work(struct work_struct *work); + +u32 qlcnic_82xx_get_saved_state(void *, u32); +void qlcnic_82xx_set_saved_state(void *, u32, u32); +void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *); +u32 qlcnic_82xx_get_cap_size(void *, int); +void qlcnic_82xx_set_sys_info(void *, int, u32); +void qlcnic_82xx_store_cap_mask(void *, u32); #endif /* __QLCNIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 54ebf300332a..173b3d12991f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -13,16 +13,19 @@ #include "qlcnic.h" -#define TX_ETHER_PKT 0x01 -#define TX_TCP_PKT 0x02 -#define TX_UDP_PKT 0x03 -#define TX_IP_PKT 0x04 -#define TX_TCP_LSO 0x05 -#define TX_TCP_LSO6 0x06 -#define TX_TCPV6_PKT 0x0b -#define TX_UDPV6_PKT 0x0c -#define FLAGS_VLAN_TAGGED 0x10 -#define FLAGS_VLAN_OOB 0x40 +#define QLCNIC_TX_ETHER_PKT 0x01 +#define QLCNIC_TX_TCP_PKT 0x02 +#define QLCNIC_TX_UDP_PKT 0x03 +#define QLCNIC_TX_IP_PKT 0x04 +#define QLCNIC_TX_TCP_LSO 0x05 +#define QLCNIC_TX_TCP_LSO6 0x06 +#define QLCNIC_TX_ENCAP_PKT 0x07 +#define QLCNIC_TX_ENCAP_LSO 0x08 +#define QLCNIC_TX_TCPV6_PKT 0x0b +#define QLCNIC_TX_UDPV6_PKT 0x0c + +#define QLCNIC_FLAGS_VLAN_TAGGED 0x10 +#define QLCNIC_FLAGS_VLAN_OOB 0x40 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ (cmd_desc)->vlan_TCI = cpu_to_le16(v); @@ -364,6 +367,101 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, spin_unlock(&adapter->mac_learn_lock); } +#define QLCNIC_ENCAP_VXLAN_PKT BIT_0 +#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1 +#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2 +#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3 +#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4 +#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5 + +static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb, + struct qlcnic_host_tx_ring *tx_ring) +{ + u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0; + int copied, copy_len, descr_size; + u32 producer = tx_ring->producer; + struct cmd_desc_type0 *hwdesc; + u16 flags = 0, encap_descr = 0; + + opcode = QLCNIC_TX_ETHER_PKT; + encap_descr = QLCNIC_ENCAP_VXLAN_PKT; + + if (skb_is_gso(skb)) { + inner_hdr_len = skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - + skb_inner_mac_header(skb); + + /* VXLAN header size = 8 */ + outer_hdr_len = skb_transport_offset(skb) + 8 + + sizeof(struct udphdr); + first_desc->outer_hdr_length = outer_hdr_len; + total_hdr_len = inner_hdr_len + outer_hdr_len; + encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM | + QLCNIC_ENCAP_DO_L4_CSUM; + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->hdr_length = inner_hdr_len; + + /* Copy inner and outer headers in Tx descriptor(s) + * If total_hdr_len > cmd_desc_type0, use multiple + * descriptors + */ + copied = 0; + descr_size = (int)sizeof(struct cmd_desc_type0); + while (copied < total_hdr_len) { + copy_len = min(descr_size, (total_hdr_len - copied)); + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + skb_copy_from_linear_data_offset(skb, copied, + (char *)hwdesc, + copy_len); + copied += copy_len; + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + + /* Make sure updated tx_ring->producer is visible + * for qlcnic_tx_avail() + */ + smp_mb(); + adapter->stats.encap_lso_frames++; + + opcode = QLCNIC_TX_ENCAP_LSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (inner_ip_hdr(skb)->version == 6) { + if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) + encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP; + } else { + if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP) + encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP; + } + + adapter->stats.encap_tx_csummed++; + opcode = QLCNIC_TX_ENCAP_PKT; + } + + /* Prepare first 16 bits of byte offset 16 of Tx descriptor */ + if (ip_hdr(skb)->version == 6) + encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6; + + /* outer IP header's size in 32bit words size*/ + encap_descr |= (skb_network_header_len(skb) >> 2) << 6; + + /* outer IP header offset */ + encap_descr |= skb_network_offset(skb) << 10; + first_desc->encap_descr = cpu_to_le16(encap_descr); + + first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) - + skb->data; + first_desc->ip_hdr_offset = skb_inner_network_offset(skb); + + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + return 0; +} + static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb, struct qlcnic_host_tx_ring *tx_ring) @@ -378,11 +476,11 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, if (protocol == ETH_P_8021Q) { vh = (struct vlan_ethhdr *)skb->data; - flags = FLAGS_VLAN_TAGGED; + flags = QLCNIC_FLAGS_VLAN_TAGGED; vlan_tci = ntohs(vh->h_vlan_TCI); protocol = ntohs(vh->h_vlan_encapsulated_proto); } else if (vlan_tx_tag_present(skb)) { - flags = FLAGS_VLAN_OOB; + flags = QLCNIC_FLAGS_VLAN_OOB; vlan_tci = vlan_tx_tag_get(skb); } if (unlikely(adapter->tx_pvid)) { @@ -391,7 +489,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) goto set_flags; - flags = FLAGS_VLAN_OOB; + flags = QLCNIC_FLAGS_VLAN_OOB; vlan_tci = adapter->tx_pvid; } set_flags: @@ -402,25 +500,26 @@ set_flags: flags |= BIT_0; memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); } - opcode = TX_ETHER_PKT; + opcode = QLCNIC_TX_ETHER_PKT; if (skb_is_gso(skb)) { hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_desc->total_hdr_length = hdr_len; - opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; + first_desc->hdr_length = hdr_len; + opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 : + QLCNIC_TX_TCP_LSO; /* For LSO, we need to copy the MAC/IP/TCP headers into * the descriptor ring */ copied = 0; offset = 2; - if (flags & FLAGS_VLAN_OOB) { - first_desc->total_hdr_length += VLAN_HLEN; + if (flags & QLCNIC_FLAGS_VLAN_OOB) { + first_desc->hdr_length += VLAN_HLEN; first_desc->tcp_hdr_offset = VLAN_HLEN; first_desc->ip_hdr_offset = VLAN_HLEN; /* Only in case of TSO on vlan device */ - flags |= FLAGS_VLAN_TAGGED; + flags |= QLCNIC_FLAGS_VLAN_TAGGED; /* Create a TSO vlan header template for firmware */ hwdesc = &tx_ring->desc_head[producer]; @@ -464,16 +563,16 @@ set_flags: l4proto = ip_hdr(skb)->protocol; if (l4proto == IPPROTO_TCP) - opcode = TX_TCP_PKT; + opcode = QLCNIC_TX_TCP_PKT; else if (l4proto == IPPROTO_UDP) - opcode = TX_UDP_PKT; + opcode = QLCNIC_TX_UDP_PKT; } else if (protocol == ETH_P_IPV6) { l4proto = ipv6_hdr(skb)->nexthdr; if (l4proto == IPPROTO_TCP) - opcode = TX_TCPV6_PKT; + opcode = QLCNIC_TX_TCPV6_PKT; else if (l4proto == IPPROTO_UDP) - opcode = TX_UDPV6_PKT; + opcode = QLCNIC_TX_UDPV6_PKT; } } first_desc->tcp_hdr_offset += skb_transport_offset(skb); @@ -563,6 +662,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct ethhdr *phdr; int i, k, frag_count, delta = 0; u32 producer, num_txd; + u16 protocol; + bool l4_is_udp = false; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_tx_stop_all_queues(netdev); @@ -653,8 +754,23 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring->producer = get_next_index(producer, num_txd); smp_mb(); - if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring))) - goto unwind_buff; + protocol = ntohs(skb->protocol); + if (protocol == ETH_P_IP) + l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP; + else if (protocol == ETH_P_IPV6) + l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP; + + /* Check if it is a VXLAN packet */ + if (!skb->encapsulation || !l4_is_udp || + !qlcnic_encap_tx_offload(adapter)) { + if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, + tx_ring))) + goto unwind_buff; + } else { + if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc, + skb, tx_ring))) + goto unwind_buff; + } if (adapter->drv_mac_learn) qlcnic_send_filter(adapter, first_desc, skb); @@ -1587,6 +1703,13 @@ static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt) return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0; } +#define QLCNIC_ENCAP_LENGTH_MASK 0x7f + +static inline u8 qlcnic_encap_length(u64 sts_data) +{ + return sts_data & QLCNIC_ENCAP_LENGTH_MASK; +} + static struct qlcnic_rx_buffer * qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring, @@ -1637,6 +1760,12 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); + if (qlcnic_encap_length(sts_data[1]) && + skb->ip_summed == CHECKSUM_UNNECESSARY) { + skb->encapsulation = 1; + adapter->stats.encap_rx_csummed++; + } + if (vid != 0xffff) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 1222865cfb73..309d05640883 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -21,6 +21,9 @@ #include <linux/aer.h> #include <linux/log2.h> #include <linux/pci.h> +#ifdef CONFIG_QLCNIC_VXLAN +#include <net/vxlan.h> +#endif MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); @@ -90,7 +93,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *); static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *, pci_channel_state_t); - static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -462,6 +464,37 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev, return 0; } +#ifdef CONFIG_QLCNIC_VXLAN +static void qlcnic_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + + /* Adapter supports only one VXLAN port. Use very first port + * for enabling offload + */ + if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) + return; + + ahw->vxlan_port = ntohs(port); + adapter->flags |= QLCNIC_ADD_VXLAN_PORT; +} + +static void qlcnic_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || + (ahw->vxlan_port != ntohs(port))) + return; + + adapter->flags |= QLCNIC_DEL_VXLAN_PORT; +} +#endif + static const struct net_device_ops qlcnic_netdev_ops = { .ndo_open = qlcnic_open, .ndo_stop = qlcnic_close, @@ -480,6 +513,10 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_fdb_del = qlcnic_fdb_del, .ndo_fdb_dump = qlcnic_fdb_dump, .ndo_get_phys_port_id = qlcnic_get_phys_port_id, +#ifdef CONFIG_QLCNIC_VXLAN + .ndo_add_vxlan_port = qlcnic_add_vxlan_port, + .ndo_del_vxlan_port = qlcnic_del_vxlan_port, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, #endif @@ -561,6 +598,12 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .disable_sds_intr = qlcnic_82xx_disable_sds_intr, .enable_tx_intr = qlcnic_82xx_enable_tx_intr, .disable_tx_intr = qlcnic_82xx_disable_tx_intr, + .get_saved_state = qlcnic_82xx_get_saved_state, + .set_saved_state = qlcnic_82xx_set_saved_state, + .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values, + .get_cap_size = qlcnic_82xx_get_cap_size, + .set_sys_info = qlcnic_82xx_set_sys_info, + .store_cap_mask = qlcnic_82xx_store_cap_mask, }; static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) @@ -684,7 +727,7 @@ restore: int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) { struct pci_dev *pdev = adapter->pdev; - int err = -1, vector; + int err, vector; if (!adapter->msix_entries) { adapter->msix_entries = kcalloc(num_msix, @@ -701,13 +744,17 @@ enable_msix: for (vector = 0; vector < num_msix; vector++) adapter->msix_entries[vector].entry = vector; - err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); - if (err == 0) { + err = pci_enable_msix_range(pdev, + adapter->msix_entries, 1, num_msix); + + if (err == num_msix) { adapter->flags |= QLCNIC_MSIX_ENABLED; adapter->ahw->num_msix = num_msix; dev_info(&pdev->dev, "using msi-x interrupts\n"); - return err; + return 0; } else if (err > 0) { + pci_disable_msix(pdev); + dev_info(&pdev->dev, "Unable to allocate %d MSI-X vectors, Available vectors %d\n", num_msix, err); @@ -715,12 +762,12 @@ enable_msix: if (qlcnic_82xx_check(adapter)) { num_msix = rounddown_pow_of_two(err); if (err < QLCNIC_82XX_MINIMUM_VECTOR) - return -EIO; + return -ENOSPC; } else { num_msix = rounddown_pow_of_two(err - 1); num_msix += 1; if (err < QLCNIC_83XX_MINIMUM_VECTOR) - return -EIO; + return -ENOSPC; } if (qlcnic_82xx_check(adapter) && @@ -747,7 +794,7 @@ enable_msix: } } - return err; + return -EIO; } static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter) @@ -1934,6 +1981,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter) qlcnic_create_sysfs_entries(adapter); +#ifdef CONFIG_QLCNIC_VXLAN + if (qlcnic_encap_rx_offload(adapter)) + vxlan_get_rx_port(netdev); +#endif + adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; return 0; @@ -2196,6 +2248,19 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) netdev->features |= NETIF_F_LRO; + if (qlcnic_encap_tx_offload(adapter)) { + netdev->features |= NETIF_F_GSO_UDP_TUNNEL; + + /* encapsulation Tx offload supported by Adapter */ + netdev->hw_enc_features = NETIF_F_IP_CSUM | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO | + NETIF_F_TSO6; + } + + if (qlcnic_encap_rx_offload(adapter)) + netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->hw_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; @@ -2442,8 +2507,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { switch (err) { case -ENOTRECOVERABLE: - dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n"); - dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n"); + dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n"); + dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n"); goto err_out_free_hw; case -ENOMEM: dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 7763962e2ec4..37b979b1266b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -211,6 +211,107 @@ enum qlcnic_minidump_opcode { QLCNIC_DUMP_RDEND = 255 }; +inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + return hdr->saved_state[index]; +} + +inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index, + u32 value) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + hdr->saved_state[index] = value; +} + +void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) +{ + struct qlcnic_82xx_dump_template_hdr *hdr; + + hdr = fw_dump->tmpl_hdr; + fw_dump->tmpl_hdr_size = hdr->size; + fw_dump->version = hdr->version; + fw_dump->num_entries = hdr->num_entries; + fw_dump->offset = hdr->offset; + + hdr->drv_cap_mask = hdr->cap_mask; + fw_dump->cap_mask = hdr->cap_mask; +} + +inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + return hdr->cap_sizes[index]; +} + +void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + hdr->sys_info[idx] = value; +} + +void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr; + + hdr->drv_cap_mask = mask; +} + +inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + return hdr->saved_state[index]; +} + +inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index, + u32 value) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + hdr->saved_state[index] = value; +} + +void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) +{ + struct qlcnic_83xx_dump_template_hdr *hdr; + + hdr = fw_dump->tmpl_hdr; + fw_dump->tmpl_hdr_size = hdr->size; + fw_dump->version = hdr->version; + fw_dump->num_entries = hdr->num_entries; + fw_dump->offset = hdr->offset; + + hdr->drv_cap_mask = hdr->cap_mask; + fw_dump->cap_mask = hdr->cap_mask; +} + +inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + return hdr->cap_sizes[index]; +} + +void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + hdr->sys_info[idx] = value; +} + +void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask) +{ + struct qlcnic_83xx_dump_template_hdr *hdr; + + hdr = tmpl_hdr; + hdr->drv_cap_mask = mask; +} + struct qlcnic_dump_operations { enum qlcnic_minidump_opcode opcode; u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *, @@ -238,11 +339,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter, static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { + void *hdr = adapter->ahw->fw_dump.tmpl_hdr; + struct __ctrl *ctr = &entry->region.ctrl; int i, k, timeout = 0; - u32 addr, data; + u32 addr, data, temp; u8 no_ops; - struct __ctrl *ctr = &entry->region.ctrl; - struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr; addr = ctr->addr; no_ops = ctr->no_ops; @@ -285,29 +386,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, } break; case QLCNIC_DUMP_RD_SAVE: - if (ctr->index_a) - addr = t_hdr->saved_state[ctr->index_a]; + temp = ctr->index_a; + if (temp) + addr = qlcnic_get_saved_state(adapter, + hdr, + temp); data = qlcnic_ind_rd(adapter, addr); - t_hdr->saved_state[ctr->index_v] = data; + qlcnic_set_saved_state(adapter, hdr, + ctr->index_v, data); break; case QLCNIC_DUMP_WRT_SAVED: - if (ctr->index_v) - data = t_hdr->saved_state[ctr->index_v]; + temp = ctr->index_v; + if (temp) + data = qlcnic_get_saved_state(adapter, + hdr, + temp); else data = ctr->val1; - if (ctr->index_a) - addr = t_hdr->saved_state[ctr->index_a]; + + temp = ctr->index_a; + if (temp) + addr = qlcnic_get_saved_state(adapter, + hdr, + temp); qlcnic_ind_wr(adapter, addr, data); break; case QLCNIC_DUMP_MOD_SAVE_ST: - data = t_hdr->saved_state[ctr->index_v]; + data = qlcnic_get_saved_state(adapter, hdr, + ctr->index_v); data <<= ctr->shl_val; data >>= ctr->shr_val; if (ctr->val2) data &= ctr->val2; data |= ctr->val3; data += ctr->val1; - t_hdr->saved_state[ctr->index_v] = data; + qlcnic_set_saved_state(adapter, hdr, + ctr->index_v, data); break; default: dev_info(&adapter->pdev->dev, @@ -544,7 +658,7 @@ out: static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, struct __mem *mem) { - struct qlcnic_dump_template_hdr *tmpl_hdr; + struct qlcnic_83xx_dump_template_hdr *tmpl_hdr; struct device *dev = &adapter->pdev->dev; u32 dma_no, dma_base_addr, temp_addr; int i, ret, dma_sts; @@ -596,7 +710,7 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter, struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; u32 temp, dma_base_addr, size = 0, read_size = 0; struct qlcnic_pex_dma_descriptor *dma_descr; - struct qlcnic_dump_template_hdr *tmpl_hdr; + struct qlcnic_83xx_dump_template_hdr *tmpl_hdr; struct device *dev = &adapter->pdev->dev; dma_addr_t dma_phys_addr; void *dma_buffer; @@ -938,8 +1052,8 @@ static int qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { - struct qlcnic_dump_template_hdr tmp_hdr; - u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32); + struct qlcnic_83xx_dump_template_hdr tmp_hdr; + u32 size = sizeof(tmp_hdr) / sizeof(u32); int ret = 0; if (qlcnic_82xx_check(adapter)) @@ -1027,17 +1141,19 @@ free_mem: return err; } +#define QLCNIC_TEMPLATE_VERSION (0x20001) + int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) { - int err; - u32 temp_size = 0; - u32 version, csum, *tmp_buf; struct qlcnic_hardware_context *ahw; - struct qlcnic_dump_template_hdr *tmpl_hdr; + struct qlcnic_fw_dump *fw_dump; + u32 version, csum, *tmp_buf; u8 use_flash_temp = 0; + u32 temp_size = 0; + int err; ahw = adapter->ahw; - + fw_dump = &ahw->fw_dump; err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size, &use_flash_temp); if (err) { @@ -1046,11 +1162,11 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) return -EIO; } - ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); - if (!ahw->fw_dump.tmpl_hdr) + fw_dump->tmpl_hdr = vzalloc(temp_size); + if (!fw_dump->tmpl_hdr) return -ENOMEM; - tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr; + tmp_buf = (u32 *)fw_dump->tmpl_hdr; if (use_flash_temp) goto flash_temp; @@ -1065,8 +1181,8 @@ flash_temp: dev_err(&adapter->pdev->dev, "Failed to get minidump template header %d\n", err); - vfree(ahw->fw_dump.tmpl_hdr); - ahw->fw_dump.tmpl_hdr = NULL; + vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; return -EIO; } } @@ -1076,21 +1192,22 @@ flash_temp: if (csum) { dev_err(&adapter->pdev->dev, "Template header checksum validation failed\n"); - vfree(ahw->fw_dump.tmpl_hdr); - ahw->fw_dump.tmpl_hdr = NULL; + vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; return -EIO; } - tmpl_hdr = ahw->fw_dump.tmpl_hdr; - tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask; + qlcnic_cache_tmpl_hdr_values(adapter, fw_dump); + dev_info(&adapter->pdev->dev, "Default minidump capture mask 0x%x\n", - tmpl_hdr->cap_mask); + fw_dump->cap_mask); - if ((tmpl_hdr->version & 0xfffff) >= 0x20001) - ahw->fw_dump.use_pex_dma = true; + if (qlcnic_83xx_check(adapter) && + (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION) + fw_dump->use_pex_dma = true; else - ahw->fw_dump.use_pex_dma = false; + fw_dump->use_pex_dma = false; qlcnic_enable_fw_dump_state(adapter); @@ -1099,21 +1216,22 @@ flash_temp: int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { - __le32 *buffer; - u32 ocm_window; - char mesg[64]; - char *msg[] = {mesg, NULL}; - int i, k, ops_cnt, ops_index, dump_size = 0; - u32 entry_offset, dump, no_entries, buf_offset = 0; - struct qlcnic_dump_entry *entry; struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; static const struct qlcnic_dump_operations *fw_dump_ops; + struct qlcnic_83xx_dump_template_hdr *hdr_83xx; + u32 entry_offset, dump, no_entries, buf_offset = 0; + int i, k, ops_cnt, ops_index, dump_size = 0; struct device *dev = &adapter->pdev->dev; struct qlcnic_hardware_context *ahw; - void *temp_buffer; + struct qlcnic_dump_entry *entry; + void *temp_buffer, *tmpl_hdr; + u32 ocm_window; + __le32 *buffer; + char mesg[64]; + char *msg[] = {mesg, NULL}; ahw = adapter->ahw; + tmpl_hdr = fw_dump->tmpl_hdr; /* Return if we don't have firmware dump template header */ if (!tmpl_hdr) @@ -1133,8 +1251,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n"); /* Calculate the size for dump data area only */ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) - if (i & tmpl_hdr->drv_cap_mask) - dump_size += tmpl_hdr->cap_sizes[k]; + if (i & fw_dump->cap_mask) + dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k); + if (!dump_size) return -EIO; @@ -1144,10 +1263,10 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) buffer = fw_dump->data; fw_dump->size = dump_size; - no_entries = tmpl_hdr->num_entries; - entry_offset = tmpl_hdr->offset; - tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; - tmpl_hdr->sys_info[1] = adapter->fw_version; + no_entries = fw_dump->num_entries; + entry_offset = fw_dump->offset; + qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION); + qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version); if (fw_dump->use_pex_dma) { temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE, @@ -1163,16 +1282,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); fw_dump_ops = qlcnic_fw_dump_ops; } else { + hdr_83xx = tmpl_hdr; ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops); fw_dump_ops = qlcnic_83xx_fw_dump_ops; - ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func]; - tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window; - tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func; + ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func]; + hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window; + hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func; } for (i = 0; i < no_entries; i++) { - entry = (void *)tmpl_hdr + entry_offset; - if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { + entry = tmpl_hdr + entry_offset; + if (!(entry->hdr.mask & fw_dump->cap_mask)) { entry->hdr.flags |= QLCNIC_DUMP_SKIP; entry_offset += entry->hdr.offset; continue; @@ -1209,8 +1329,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) fw_dump->clr = 1; snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); - dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n", - adapter->netdev->name, fw_dump->size, tmpl_hdr->size); + netdev_info(adapter->netdev, + "Dump data %d bytes captured, template header size %d bytes\n", + fw_dump->size, fw_dump->tmpl_hdr_size); /* Send a udev event to notify availability of FW dump */ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index e5277a632671..14f748cbf0de 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -15,6 +15,7 @@ #define QLC_MAC_OPCODE_MASK 0x7 #define QLC_VF_FLOOD_BIT BIT_16 #define QLC_FLOOD_MODE 0x5 +#define QLC_SRIOV_ALLOW_VLAN0 BIT_19 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); @@ -335,8 +336,11 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter, return err; cmd.req.arg[1] = 0x4; - if (enable) + if (enable) { cmd.req.arg[1] |= BIT_16; + if (qlcnic_84xx_check(adapter)) + cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0; + } err = qlcnic_issue_cmd(adapter, &cmd); if (err) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 3d64113a35af..448d156c3d08 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -350,33 +350,15 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, return size; } -static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter) -{ - struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 count = 0; - - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return ahw->total_nic_func; - - if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT) - count = QLC_DEFAULT_VNIC_COUNT; - else - count = ahw->max_vnic_func; - - return count; -} - int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) { - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); int i; - for (i = 0; i < pci_func_count; i++) { + for (i = 0; i < adapter->ahw->max_vnic_func; i++) { if (adapter->npars[i].pci_func == pci_func) return i; } - - return -1; + return -EINVAL; } static int validate_pm_config(struct qlcnic_adapter *adapter, @@ -464,23 +446,21 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, { struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); struct qlcnic_pm_func_cfg *pm_cfg; - int i, pm_cfg_size; u8 pci_func; + u32 count; + int i; - pm_cfg_size = pci_func_count * sizeof(*pm_cfg); - if (size != pm_cfg_size) - return QL_STATUS_INVALID_PARAM; - - memset(buf, 0, pm_cfg_size); + memset(buf, 0, size); pm_cfg = (struct qlcnic_pm_func_cfg *)buf; - - for (i = 0; i < pci_func_count; i++) { + count = size / sizeof(struct qlcnic_pm_func_cfg); + for (i = 0; i < adapter->ahw->total_nic_func; i++) { pci_func = adapter->npars[i].pci_func; - if (!adapter->npars[i].active) + if (pci_func >= count) { + dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", + __func__, adapter->ahw->total_nic_func, count); continue; - + } if (!adapter->npars[i].eswitch_status) continue; @@ -494,7 +474,6 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, static int validate_esw_config(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg, int count) { - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); struct qlcnic_hardware_context *ahw = adapter->ahw; int i, ret; u32 op_mode; @@ -507,7 +486,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, for (i = 0; i < count; i++) { pci_func = esw_cfg[i].pci_func; - if (pci_func >= pci_func_count) + if (pci_func >= ahw->max_vnic_func) return QL_STATUS_INVALID_PARAM; if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) @@ -642,23 +621,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, { struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); struct qlcnic_esw_func_cfg *esw_cfg; - size_t esw_cfg_size; - u8 i, pci_func; - - esw_cfg_size = pci_func_count * sizeof(*esw_cfg); - if (size != esw_cfg_size) - return QL_STATUS_INVALID_PARAM; + u8 pci_func; + u32 count; + int i; - memset(buf, 0, esw_cfg_size); + memset(buf, 0, size); esw_cfg = (struct qlcnic_esw_func_cfg *)buf; - - for (i = 0; i < pci_func_count; i++) { + count = size / sizeof(struct qlcnic_esw_func_cfg); + for (i = 0; i < adapter->ahw->total_nic_func; i++) { pci_func = adapter->npars[i].pci_func; - if (!adapter->npars[i].active) + if (pci_func >= count) { + dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", + __func__, adapter->ahw->total_nic_func, count); continue; - + } if (!adapter->npars[i].eswitch_status) continue; @@ -741,23 +718,24 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, { struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); struct qlcnic_npar_func_cfg *np_cfg; struct qlcnic_info nic_info; - size_t np_cfg_size; int i, ret; - - np_cfg_size = pci_func_count * sizeof(*np_cfg); - if (size != np_cfg_size) - return QL_STATUS_INVALID_PARAM; + u32 count; memset(&nic_info, 0, sizeof(struct qlcnic_info)); - memset(buf, 0, np_cfg_size); + memset(buf, 0, size); np_cfg = (struct qlcnic_npar_func_cfg *)buf; - for (i = 0; i < pci_func_count; i++) { + count = size / sizeof(struct qlcnic_npar_func_cfg); + for (i = 0; i < adapter->ahw->total_nic_func; i++) { if (qlcnic_is_valid_nic_func(adapter, i) < 0) continue; + if (adapter->npars[i].pci_func >= count) { + dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", + __func__, adapter->ahw->total_nic_func, count); + continue; + } ret = qlcnic_get_nic_info(adapter, &nic_info, i); if (ret) return ret; @@ -783,7 +761,6 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, { struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); struct qlcnic_esw_statistics port_stats; int ret; @@ -793,7 +770,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, if (size != sizeof(struct qlcnic_esw_statistics)) return QL_STATUS_INVALID_PARAM; - if (offset >= pci_func_count) + if (offset >= adapter->ahw->max_vnic_func) return QL_STATUS_INVALID_PARAM; memset(&port_stats, 0, size); @@ -884,13 +861,12 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); int ret; if (qlcnic_83xx_check(adapter)) return QLC_STATUS_UNSUPPORTED_CMD; - if (offset >= pci_func_count) + if (offset >= adapter->ahw->max_vnic_func) return QL_STATUS_INVALID_PARAM; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, @@ -914,17 +890,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, { struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 pci_func_count = qlcnic_get_pci_func_count(adapter); struct qlcnic_pci_func_cfg *pci_cfg; struct qlcnic_pci_info *pci_info; - size_t pci_cfg_sz; int i, ret; + u32 count; - pci_cfg_sz = pci_func_count * sizeof(*pci_cfg); - if (size != pci_cfg_sz) - return QL_STATUS_INVALID_PARAM; - - pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL); + pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; @@ -935,7 +906,8 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, } pci_cfg = (struct qlcnic_pci_func_cfg *)buf; - for (i = 0; i < pci_func_count; i++) { + count = size / sizeof(struct qlcnic_pci_func_cfg); + for (i = 0; i < count; i++) { pci_cfg[i].pci_func = pci_info[i].id; pci_cfg[i].func_type = pci_info[i].type; pci_cfg[i].func_state = 0; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index ce2cfddbed50..0a1d76acab81 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2556,11 +2556,10 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) if (skb_is_gso(skb)) { int err; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; @@ -3331,24 +3330,16 @@ static void ql_enable_msix(struct ql_adapter *qdev) for (i = 0; i < qdev->intr_count; i++) qdev->msi_x_entry[i].entry = i; - /* Loop to get our vectors. We start with - * what we want and settle for what we get. - */ - do { - err = pci_enable_msix(qdev->pdev, - qdev->msi_x_entry, qdev->intr_count); - if (err > 0) - qdev->intr_count = err; - } while (err > 0); - + err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry, + 1, qdev->intr_count); if (err < 0) { kfree(qdev->msi_x_entry); qdev->msi_x_entry = NULL; netif_warn(qdev, ifup, qdev->ndev, "MSI-X Enable failed, trying MSI.\n"); - qdev->intr_count = 1; qlge_irq_type = MSI_IRQ; - } else if (err == 0) { + } else { + qdev->intr_count = err; set_bit(QL_MSIX_ENABLED, &qdev->flags); netif_info(qdev, ifup, qdev->ndev, "MSI-X Enabled, got %d vectors.\n", @@ -4765,7 +4756,9 @@ static int qlge_probe(struct pci_dev *pdev, ndev->features = ndev->hw_features; ndev->vlan_features = ndev->hw_features; /* vlan gets same features (except vlan filter) */ - ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); if (test_bit(QL_DMA64, &qdev->flags)) ndev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 819b74cefd64..cd045ecb9816 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -270,11 +270,6 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr, return r6040_phy_write(ioaddr, phy_addr, reg, value); } -static int r6040_mdiobus_reset(struct mii_bus *bus) -{ - return 0; -} - static void r6040_free_txbufs(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); @@ -1191,7 +1186,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) lp->mii_bus->priv = dev; lp->mii_bus->read = r6040_mdiobus_read; lp->mii_bus->write = r6040_mdiobus_write; - lp->mii_bus->reset = r6040_mdiobus_reset; lp->mii_bus->name = "r6040_eth_mii"; snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", dev_name(&pdev->dev), card_idx); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 737c1a881f78..2bc728e65e24 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -476,7 +476,7 @@ rx_status_loop: rx = 0; cpw16(IntrStatus, cp_rx_intr_mask); - while (1) { + while (rx < budget) { u32 status, len; dma_addr_t mapping, new_mapping; struct sk_buff *skb, *new_skb; @@ -554,9 +554,6 @@ rx_next: else desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); - - if (rx >= budget) - break; } cp->rx_tail = rx_tail; @@ -899,7 +896,7 @@ out_unlock: return NETDEV_TX_OK; out_dma_error: - kfree_skb(skb); + dev_kfree_skb_any(skb); cp->dev->stats.tx_dropped++; goto out_unlock; } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index da5972eefdd2..2e5df148af4c 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, if (len < ETH_ZLEN) memset(tp->tx_buf[entry], 0, ETH_ZLEN); skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); } else { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -2522,16 +2522,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) netdev_stats_to_stats64(stats, &dev->stats); do { - start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); stats->rx_packets = tp->rx_stats.packets; stats->rx_bytes = tp->rx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); return stats; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e9779653cd4c..aa1c079f231d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -209,7 +209,7 @@ static const struct { [RTL_GIGA_MAC_VER_16] = _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), [RTL_GIGA_MAC_VER_18] = _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_19] = @@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, tp->TxDescArray + entry); if (skb) { tp->dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_skb->skb = NULL; } } @@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, err_dma_1: rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); err_dma_0: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); err_update_stats: dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) tp->tx_stats.packets++; tp->tx_stats.bytes += tx_skb->skb->len; u64_stats_update_end(&tp->tx_stats.syncp); - dev_kfree_skb(tx_skb->skb); + dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } dirty_tx++; @@ -6590,17 +6590,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) rtl8169_rx_missed(dev, ioaddr); do { - start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); stats->rx_packets = tp->rx_stats.packets; stats->rx_bytes = tp->rx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 040cb94e8219..6a9509ccd33b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1,8 +1,9 @@ /* SuperH Ethernet device driver * * Copyright (C) 2006-2012 Nobuhiro Iwamatsu - * Copyright (C) 2008-2013 Renesas Solutions Corp. - * Copyright (C) 2013 Cogent Embedded, Inc. + * Copyright (C) 2008-2014 Renesas Solutions Corp. + * Copyright (C) 2013-2014 Cogent Embedded, Inc. + * Copyright (C) 2014 Codethink Limited * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -27,6 +28,10 @@ #include <linux/platform_device.h> #include <linux/mdio-bitbang.h> #include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> #include <linux/phy.h> #include <linux/cache.h> #include <linux/io.h> @@ -36,6 +41,7 @@ #include <linux/if_vlan.h> #include <linux/clk.h> #include <linux/sh_eth.h> +#include <linux/of_mdio.h> #include "sh_eth.h" @@ -394,7 +400,8 @@ static void sh_eth_select_mii(struct net_device *ndev) value = 0x0; break; default: - pr_warn("PHY interface mode was not setup. Set to MII.\n"); + netdev_warn(ndev, + "PHY interface mode was not setup. Set to MII.\n"); value = 0x1; break; } @@ -848,7 +855,7 @@ static int sh_eth_check_reset(struct net_device *ndev) cnt--; } if (cnt <= 0) { - pr_err("Device reset failed\n"); + netdev_err(ndev, "Device reset failed\n"); ret = -ETIMEDOUT; } return ret; @@ -866,7 +873,7 @@ static int sh_eth_reset(struct net_device *ndev) ret = sh_eth_check_reset(ndev); if (ret) - goto out; + return ret; /* Table Init */ sh_eth_write(ndev, 0x0, TDLAR); @@ -893,7 +900,6 @@ static int sh_eth_reset(struct net_device *ndev) EDMR); } -out: return ret; } @@ -1257,7 +1263,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) /* Soft Reset */ ret = sh_eth_reset(ndev); if (ret) - goto out; + return ret; if (mdp->cd->rmiimode) sh_eth_write(ndev, 0x1, RMIIMODE); @@ -1336,7 +1342,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) netif_start_queue(ndev); } -out: return ret; } @@ -1550,8 +1555,7 @@ ignore_link: /* Unused write back interrupt */ if (intr_status & EESR_TABT) { /* Transmit Abort int */ ndev->stats.tx_aborted_errors++; - if (netif_msg_tx_err(mdp)) - dev_err(&ndev->dev, "Transmit Abort\n"); + netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); } } @@ -1560,45 +1564,38 @@ ignore_link: if (intr_status & EESR_RFRMER) { /* Receive Frame Overflow int */ ndev->stats.rx_frame_errors++; - if (netif_msg_rx_err(mdp)) - dev_err(&ndev->dev, "Receive Abort\n"); + netif_err(mdp, rx_err, ndev, "Receive Abort\n"); } } if (intr_status & EESR_TDE) { /* Transmit Descriptor Empty int */ ndev->stats.tx_fifo_errors++; - if (netif_msg_tx_err(mdp)) - dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); + netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); } if (intr_status & EESR_TFE) { /* FIFO under flow */ ndev->stats.tx_fifo_errors++; - if (netif_msg_tx_err(mdp)) - dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); + netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); } if (intr_status & EESR_RDE) { /* Receive Descriptor Empty int */ ndev->stats.rx_over_errors++; - - if (netif_msg_rx_err(mdp)) - dev_err(&ndev->dev, "Receive Descriptor Empty\n"); + netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); } if (intr_status & EESR_RFE) { /* Receive FIFO Overflow int */ ndev->stats.rx_fifo_errors++; - if (netif_msg_rx_err(mdp)) - dev_err(&ndev->dev, "Receive FIFO Overflow\n"); + netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); } if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { /* Address Error */ ndev->stats.tx_fifo_errors++; - if (netif_msg_tx_err(mdp)) - dev_err(&ndev->dev, "Address Error\n"); + netif_err(mdp, tx_err, ndev, "Address Error\n"); } mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; @@ -1609,9 +1606,9 @@ ignore_link: u32 edtrr = sh_eth_read(ndev, EDTRR); /* dmesg */ - dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", - intr_status, mdp->cur_tx, mdp->dirty_tx, - (u32)ndev->state, edtrr); + netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", + intr_status, mdp->cur_tx, mdp->dirty_tx, + (u32)ndev->state, edtrr); /* dirty buffer free */ sh_eth_txfree(ndev); @@ -1656,9 +1653,9 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) EESIPR); __napi_schedule(&mdp->napi); } else { - dev_warn(&ndev->dev, - "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", - intr_status, intr_enable); + netdev_warn(ndev, + "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", + intr_status, intr_enable); } } @@ -1757,27 +1754,42 @@ static void sh_eth_adjust_link(struct net_device *ndev) /* PHY init function */ static int sh_eth_phy_init(struct net_device *ndev) { + struct device_node *np = ndev->dev.parent->of_node; struct sh_eth_private *mdp = netdev_priv(ndev); - char phy_id[MII_BUS_ID_SIZE + 3]; struct phy_device *phydev = NULL; - snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, - mdp->mii_bus->id, mdp->phy_id); - mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; /* Try connect to PHY */ - phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, - mdp->phy_interface); + if (np) { + struct device_node *pn; + + pn = of_parse_phandle(np, "phy-handle", 0); + phydev = of_phy_connect(ndev, pn, + sh_eth_adjust_link, 0, + mdp->phy_interface); + + if (!phydev) + phydev = ERR_PTR(-ENOENT); + } else { + char phy_id[MII_BUS_ID_SIZE + 3]; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + mdp->mii_bus->id, mdp->phy_id); + + phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, + mdp->phy_interface); + } + if (IS_ERR(phydev)) { - dev_err(&ndev->dev, "phy_connect failed\n"); + netdev_err(ndev, "failed to connect PHY\n"); return PTR_ERR(phydev); } - dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n", - phydev->addr, phydev->irq, phydev->drv->name); + netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", + phydev->addr, phydev->irq, phydev->drv->name); mdp->phydev = phydev; @@ -1958,12 +1970,12 @@ static int sh_eth_set_ringparam(struct net_device *ndev, ret = sh_eth_ring_init(ndev); if (ret < 0) { - dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); + netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); return ret; } ret = sh_eth_dev_init(ndev, false); if (ret < 0) { - dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); + netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); return ret; } @@ -2004,7 +2016,7 @@ static int sh_eth_open(struct net_device *ndev) ret = request_irq(ndev->irq, sh_eth_interrupt, mdp->cd->irq_flags, ndev->name, ndev); if (ret) { - dev_err(&ndev->dev, "Can not assign IRQ number\n"); + netdev_err(ndev, "Can not assign IRQ number\n"); goto out_napi_off; } @@ -2042,10 +2054,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev) netif_stop_queue(ndev); - if (netif_msg_timer(mdp)) { - dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n", - ndev->name, (int)sh_eth_read(ndev, EESR)); - } + netif_err(mdp, timer, ndev, + "transmit timed out, status %8.8x, resetting...\n", + (int)sh_eth_read(ndev, EESR)); /* tx_errors count up */ ndev->stats.tx_errors++; @@ -2080,8 +2091,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) spin_lock_irqsave(&mdp->lock, flags); if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { if (!sh_eth_txfree(ndev)) { - if (netif_msg_tx_queued(mdp)) - dev_warn(&ndev->dev, "TxFD exhausted.\n"); + netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); netif_stop_queue(ndev); spin_unlock_irqrestore(&mdp->lock, flags); return NETDEV_TX_BUSY; @@ -2098,8 +2108,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb->len + 2); txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (skb->len < ETHERSMALL) - txdesc->buffer_length = ETHERSMALL; + if (skb->len < ETH_ZLEN) + txdesc->buffer_length = ETH_ZLEN; else txdesc->buffer_length = skb->len; @@ -2251,7 +2261,7 @@ static int sh_eth_tsu_busy(struct net_device *ndev) udelay(10); timeout--; if (timeout <= 0) { - dev_err(&ndev->dev, "%s: timeout\n", __func__); + netdev_err(ndev, "%s: timeout\n", __func__); return -ETIMEDOUT; } } @@ -2571,37 +2581,30 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) } /* MDIO bus release function */ -static int sh_mdio_release(struct net_device *ndev) +static int sh_mdio_release(struct sh_eth_private *mdp) { - struct mii_bus *bus = dev_get_drvdata(&ndev->dev); - /* unregister mdio bus */ - mdiobus_unregister(bus); - - /* remove mdio bus info from net_device */ - dev_set_drvdata(&ndev->dev, NULL); + mdiobus_unregister(mdp->mii_bus); /* free bitbang info */ - free_mdio_bitbang(bus); + free_mdio_bitbang(mdp->mii_bus); return 0; } /* MDIO bus init function */ -static int sh_mdio_init(struct net_device *ndev, int id, +static int sh_mdio_init(struct sh_eth_private *mdp, struct sh_eth_plat_data *pd) { int ret, i; struct bb_info *bitbang; - struct sh_eth_private *mdp = netdev_priv(ndev); + struct platform_device *pdev = mdp->pdev; + struct device *dev = &mdp->pdev->dev; /* create bit control struct for PHY */ - bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info), - GFP_KERNEL); - if (!bitbang) { - ret = -ENOMEM; - goto out; - } + bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) + return -ENOMEM; /* bitbang init */ bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; @@ -2614,44 +2617,42 @@ static int sh_mdio_init(struct net_device *ndev, int id, /* MII controller setting */ mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); - if (!mdp->mii_bus) { - ret = -ENOMEM; - goto out; - } + if (!mdp->mii_bus) + return -ENOMEM; /* Hook up MII support for ethtool */ mdp->mii_bus->name = "sh_mii"; - mdp->mii_bus->parent = &ndev->dev; + mdp->mii_bus->parent = dev; snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - mdp->pdev->name, id); + pdev->name, pdev->id); /* PHY IRQ */ - mdp->mii_bus->irq = devm_kzalloc(&ndev->dev, - sizeof(int) * PHY_MAX_ADDR, + mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!mdp->mii_bus->irq) { ret = -ENOMEM; goto out_free_bus; } - for (i = 0; i < PHY_MAX_ADDR; i++) - mdp->mii_bus->irq[i] = PHY_POLL; - if (pd->phy_irq > 0) - mdp->mii_bus->irq[pd->phy] = pd->phy_irq; + /* register MDIO bus */ + if (dev->of_node) { + ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); + } else { + for (i = 0; i < PHY_MAX_ADDR; i++) + mdp->mii_bus->irq[i] = PHY_POLL; + if (pd->phy_irq > 0) + mdp->mii_bus->irq[pd->phy] = pd->phy_irq; + + ret = mdiobus_register(mdp->mii_bus); + } - /* register mdio bus */ - ret = mdiobus_register(mdp->mii_bus); if (ret) goto out_free_bus; - dev_set_drvdata(&ndev->dev, mdp->mii_bus); - return 0; out_free_bus: free_mdio_bitbang(mdp->mii_bus); - -out: return ret; } @@ -2676,7 +2677,6 @@ static const u16 *sh_eth_get_register_offset(int register_type) reg_offset = sh_eth_offset_fast_sh3_sh2; break; default: - pr_err("Unknown register type (%d)\n", register_type); break; } @@ -2710,6 +2710,48 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { .ndo_change_mtu = eth_change_mtu, }; +#ifdef CONFIG_OF +static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct sh_eth_plat_data *pdata; + const char *mac_addr; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + pdata->phy_interface = of_get_phy_mode(np); + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); + + pdata->no_ether_link = + of_property_read_bool(np, "renesas,no-ether-link"); + pdata->ether_link_active_low = + of_property_read_bool(np, "renesas,ether-link-active-low"); + + return pdata; +} + +static const struct of_device_id sh_eth_match_table[] = { + { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data }, + { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data }, + { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data }, + { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_eth_match_table); +#else +static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + return NULL; +} +#endif + static int sh_eth_drv_probe(struct platform_device *pdev) { int ret, devno = 0; @@ -2723,15 +2765,15 @@ static int sh_eth_drv_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(res == NULL)) { dev_err(&pdev->dev, "invalid resource\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } ndev = alloc_etherdev(sizeof(struct sh_eth_private)); - if (!ndev) { - ret = -ENOMEM; - goto out; - } + if (!ndev) + return -ENOMEM; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); /* The sh Ether-specific entries in the device structure. */ ndev->base_addr = res->start; @@ -2760,9 +2802,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) spin_lock_init(&mdp->lock); mdp->pdev = pdev; - pm_runtime_enable(&pdev->dev); - pm_runtime_resume(&pdev->dev); + if (pdev->dev.of_node) + pd = sh_eth_parse_dt(&pdev->dev); if (!pd) { dev_err(&pdev->dev, "no platform data\n"); ret = -EINVAL; @@ -2778,8 +2820,22 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->ether_link_active_low = pd->ether_link_active_low; /* set cpu data */ - mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; + if (id) { + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; + } else { + const struct of_device_id *match; + + match = of_match_device(of_match_ptr(sh_eth_match_table), + &pdev->dev); + mdp->cd = (struct sh_eth_cpu_data *)match->data; + } mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); + if (!mdp->reg_offset) { + dev_err(&pdev->dev, "Unknown register type (%d)\n", + mdp->cd->register_type); + ret = -EINVAL; + goto out_release; + } sh_eth_set_default_cpu_data(mdp->cd); /* set function */ @@ -2825,6 +2881,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev) } } + /* MDIO bus init */ + ret = sh_mdio_init(mdp, pd); + if (ret) { + dev_err(&ndev->dev, "failed to initialise MDIO\n"); + goto out_release; + } + netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); /* network device register */ @@ -2832,31 +2895,26 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (ret) goto out_napi_del; - /* mdio bus init */ - ret = sh_mdio_init(ndev, pdev->id, pd); - if (ret) - goto out_unregister; - /* print device information */ - pr_info("Base address at 0x%x, %pM, IRQ %d.\n", - (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", + (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + pm_runtime_put(&pdev->dev); platform_set_drvdata(pdev, ndev); return ret; -out_unregister: - unregister_netdev(ndev); - out_napi_del: netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); out_release: /* net_dev free */ if (ndev) free_netdev(ndev); -out: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; } @@ -2865,9 +2923,9 @@ static int sh_eth_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct sh_eth_private *mdp = netdev_priv(ndev); - sh_mdio_release(ndev); unregister_netdev(ndev); netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); pm_runtime_disable(&pdev->dev); free_netdev(ndev); @@ -2920,6 +2978,7 @@ static struct platform_driver sh_eth_driver = { .driver = { .name = CARDNAME, .pm = SH_ETH_PM_OPS, + .of_match_table = of_match_ptr(sh_eth_match_table), }, }; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 6075915b88ec..d55e37cd5fec 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -27,8 +27,7 @@ #define RX_RING_MIN 64 #define TX_RING_MAX 1024 #define RX_RING_MAX 1024 -#define ETHERSMALL 60 -#define PKT_BUF_SZ 1538 +#define PKT_BUF_SZ 1538 #define SH_ETH_TSU_TIMEOUT_MS 500 #define SH_ETH_TSU_CAM_ENTRIES 32 diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig new file mode 100644 index 000000000000..7902341f2623 --- /dev/null +++ b/drivers/net/ethernet/samsung/Kconfig @@ -0,0 +1,16 @@ +# +# Samsung Ethernet device configuration +# + +config NET_VENDOR_SAMSUNG + bool "Samsung Ethernet device" + default y + ---help--- + This is the driver for the SXGBE 10G Ethernet IP block found on Samsung + platforms. + +if NET_VENDOR_SAMSUNG + +source "drivers/net/ethernet/samsung/sxgbe/Kconfig" + +endif # NET_VENDOR_SAMSUNG diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile new file mode 100644 index 000000000000..1773c29b8d76 --- /dev/null +++ b/drivers/net/ethernet/samsung/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Samsung Ethernet device drivers. +# + +obj-$(CONFIG_SXGBE_ETH) += sxgbe/ diff --git a/drivers/net/ethernet/samsung/sxgbe/Kconfig b/drivers/net/ethernet/samsung/sxgbe/Kconfig new file mode 100644 index 000000000000..d79288c51d0a --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/Kconfig @@ -0,0 +1,9 @@ +config SXGBE_ETH + tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver" + depends on HAS_IOMEM && HAS_DMA + select PHYLIB + select CRC32 + select PTP_1588_CLOCK + ---help--- + This is the driver for the SXGBE 10G Ethernet IP block found on Samsung + platforms. diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile new file mode 100644 index 000000000000..dcc80b9d4370 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o +samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \ + sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \ + sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h new file mode 100644 index 000000000000..6203c7d8550f --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -0,0 +1,535 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SXGBE_COMMON_H__ +#define __SXGBE_COMMON_H__ + +/* forward references */ +struct sxgbe_desc_ops; +struct sxgbe_dma_ops; +struct sxgbe_mtl_ops; + +#define SXGBE_RESOURCE_NAME "sam_sxgbeeth" +#define DRV_MODULE_VERSION "November_2013" + +/* MAX HW feature words */ +#define SXGBE_HW_WORDS 3 + +#define SXGBE_RX_COE_NONE 0 + +/* CSR Frequency Access Defines*/ +#define SXGBE_CSR_F_150M 150000000 +#define SXGBE_CSR_F_250M 250000000 +#define SXGBE_CSR_F_300M 300000000 +#define SXGBE_CSR_F_350M 350000000 +#define SXGBE_CSR_F_400M 400000000 +#define SXGBE_CSR_F_500M 500000000 + +/* pause time */ +#define SXGBE_PAUSE_TIME 0x200 + +/* tx queues */ +#define SXGBE_TX_QUEUES 8 +#define SXGBE_RX_QUEUES 16 + +/* Calculated based how much time does it take to fill 256KB Rx memory + * at 10Gb speed at 156MHz clock rate and considered little less then + * the actual value. + */ +#define SXGBE_MAX_DMA_RIWT 0x70 +#define SXGBE_MIN_DMA_RIWT 0x01 + +/* Tx coalesce parameters */ +#define SXGBE_COAL_TX_TIMER 40000 +#define SXGBE_MAX_COAL_TX_TICK 100000 +#define SXGBE_TX_MAX_FRAMES 512 +#define SXGBE_TX_FRAMES 128 + +/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */ +#define BUF_SIZE_16KiB 16384 +#define BUF_SIZE_8KiB 8192 +#define BUF_SIZE_4KiB 4096 +#define BUF_SIZE_2KiB 2048 + +#define SXGBE_DEFAULT_LIT_LS 0x3E8 +#define SXGBE_DEFAULT_TWT_LS 0x0 + +/* Flow Control defines */ +#define SXGBE_FLOW_OFF 0 +#define SXGBE_FLOW_RX 1 +#define SXGBE_FLOW_TX 2 +#define SXGBE_FLOW_AUTO (SXGBE_FLOW_TX | SXGBE_FLOW_RX) + +#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ + +/* errors */ +#define RX_GMII_ERR 0x01 +#define RX_WATCHDOG_ERR 0x02 +#define RX_CRC_ERR 0x03 +#define RX_GAINT_ERR 0x04 +#define RX_IP_HDR_ERR 0x05 +#define RX_PAYLOAD_ERR 0x06 +#define RX_OVERFLOW_ERR 0x07 + +/* pkt type */ +#define RX_LEN_PKT 0x00 +#define RX_MACCTL_PKT 0x01 +#define RX_DCBCTL_PKT 0x02 +#define RX_ARP_PKT 0x03 +#define RX_OAM_PKT 0x04 +#define RX_UNTAG_PKT 0x05 +#define RX_OTHER_PKT 0x07 +#define RX_SVLAN_PKT 0x08 +#define RX_CVLAN_PKT 0x09 +#define RX_DVLAN_OCVLAN_ICVLAN_PKT 0x0A +#define RX_DVLAN_OSVLAN_ISVLAN_PKT 0x0B +#define RX_DVLAN_OSVLAN_ICVLAN_PKT 0x0C +#define RX_DVLAN_OCVLAN_ISVLAN_PKT 0x0D + +#define RX_NOT_IP_PKT 0x00 +#define RX_IPV4_TCP_PKT 0x01 +#define RX_IPV4_UDP_PKT 0x02 +#define RX_IPV4_ICMP_PKT 0x03 +#define RX_IPV4_UNKNOWN_PKT 0x07 +#define RX_IPV6_TCP_PKT 0x09 +#define RX_IPV6_UDP_PKT 0x0A +#define RX_IPV6_ICMP_PKT 0x0B +#define RX_IPV6_UNKNOWN_PKT 0x0F + +#define RX_NO_PTP 0x00 +#define RX_PTP_SYNC 0x01 +#define RX_PTP_FOLLOW_UP 0x02 +#define RX_PTP_DELAY_REQ 0x03 +#define RX_PTP_DELAY_RESP 0x04 +#define RX_PTP_PDELAY_REQ 0x05 +#define RX_PTP_PDELAY_RESP 0x06 +#define RX_PTP_PDELAY_FOLLOW_UP 0x07 +#define RX_PTP_ANNOUNCE 0x08 +#define RX_PTP_MGMT 0x09 +#define RX_PTP_SIGNAL 0x0A +#define RX_PTP_RESV_MSG 0x0F + +/* EEE-LPI mode flags*/ +#define TX_ENTRY_LPI_MODE 0x10 +#define TX_EXIT_LPI_MODE 0x20 +#define RX_ENTRY_LPI_MODE 0x40 +#define RX_EXIT_LPI_MODE 0x80 + +/* EEE-LPI Interrupt status flag */ +#define LPI_INT_STATUS BIT(5) + +/* EEE-LPI Default timer values */ +#define LPI_LINK_STATUS_TIMER 0x3E8 +#define LPI_MAC_WAIT_TIMER 0x00 + +/* EEE-LPI Control and status definitions */ +#define LPI_CTRL_STATUS_TXA BIT(19) +#define LPI_CTRL_STATUS_PLSDIS BIT(18) +#define LPI_CTRL_STATUS_PLS BIT(17) +#define LPI_CTRL_STATUS_LPIEN BIT(16) +#define LPI_CTRL_STATUS_TXRSTP BIT(11) +#define LPI_CTRL_STATUS_RXRSTP BIT(10) +#define LPI_CTRL_STATUS_RLPIST BIT(9) +#define LPI_CTRL_STATUS_TLPIST BIT(8) +#define LPI_CTRL_STATUS_RLPIEX BIT(3) +#define LPI_CTRL_STATUS_RLPIEN BIT(2) +#define LPI_CTRL_STATUS_TLPIEX BIT(1) +#define LPI_CTRL_STATUS_TLPIEN BIT(0) + +enum dma_irq_status { + tx_hard_error = BIT(0), + tx_bump_tc = BIT(1), + handle_tx = BIT(2), + rx_hard_error = BIT(3), + rx_bump_tc = BIT(4), + handle_rx = BIT(5), +}; + +#define NETIF_F_HW_VLAN_ALL (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_TX | \ + NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +/* MMC control defines */ +#define SXGBE_MMC_CTRL_CNT_FRZ 0x00000008 + +/* SXGBE HW ADDR regs */ +#define SXGBE_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ + (reg * 8)) +#define SXGBE_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \ + (reg * 8)) +#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */ +#define SXGBE_FRAME_FILTER 0x00000004 /* Frame Filter */ + +/* SXGBE Frame Filter defines */ +#define SXGBE_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define SXGBE_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define SXGBE_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define SXGBE_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define SXGBE_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define SXGBE_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define SXGBE_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define SXGBE_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define SXGBE_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define SXGBE_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ + +#define SXGBE_HASH_TABLE_SIZE 64 +#define SXGBE_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define SXGBE_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ + +#define SXGBE_HI_REG_AE 0x80000000 + +/* Minimum and maximum MTU */ +#define MIN_MTU 68 +#define MAX_MTU 9000 + +#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \ + for (queue_num = 0; queue_num < max_queues; queue_num++) + +#define DRV_VERSION "1.0.0" + +#define SXGBE_MAX_RX_CHANNELS 16 +#define SXGBE_MAX_TX_CHANNELS 16 + +#define START_MAC_REG_OFFSET 0x0000 +#define MAX_MAC_REG_OFFSET 0x0DFC +#define START_MTL_REG_OFFSET 0x1000 +#define MAX_MTL_REG_OFFSET 0x18FC +#define START_DMA_REG_OFFSET 0x3000 +#define MAX_DMA_REG_OFFSET 0x38FC + +#define REG_SPACE_SIZE 0x2000 + +/* sxgbe statistics counters */ +struct sxgbe_extra_stats { + /* TX/RX IRQ events */ + unsigned long tx_underflow_irq; + unsigned long tx_process_stopped_irq; + unsigned long tx_ctxt_desc_err; + unsigned long tx_threshold; + unsigned long rx_threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long normal_irq_n; + unsigned long tx_normal_irq_n; + unsigned long rx_normal_irq_n; + unsigned long napi_poll; + unsigned long tx_clean; + unsigned long tx_reset_ic_bit; + unsigned long rx_process_stopped_irq; + unsigned long rx_underflow_irq; + + /* Bus access errors */ + unsigned long fatal_bus_error_irq; + unsigned long tx_read_transfer_err; + unsigned long tx_write_transfer_err; + unsigned long tx_desc_access_err; + unsigned long tx_buffer_access_err; + unsigned long tx_data_transfer_err; + unsigned long rx_read_transfer_err; + unsigned long rx_write_transfer_err; + unsigned long rx_desc_access_err; + unsigned long rx_buffer_access_err; + unsigned long rx_data_transfer_err; + + /* EEE-LPI stats */ + unsigned long tx_lpi_entry_n; + unsigned long tx_lpi_exit_n; + unsigned long rx_lpi_entry_n; + unsigned long rx_lpi_exit_n; + unsigned long eee_wakeup_error_n; + + /* RX specific */ + /* L2 error */ + unsigned long rx_code_gmii_err; + unsigned long rx_watchdog_err; + unsigned long rx_crc_err; + unsigned long rx_gaint_pkt_err; + unsigned long ip_hdr_err; + unsigned long ip_payload_err; + unsigned long overflow_error; + + /* L2 Pkt type */ + unsigned long len_pkt; + unsigned long mac_ctl_pkt; + unsigned long dcb_ctl_pkt; + unsigned long arp_pkt; + unsigned long oam_pkt; + unsigned long untag_okt; + unsigned long other_pkt; + unsigned long svlan_tag_pkt; + unsigned long cvlan_tag_pkt; + unsigned long dvlan_ocvlan_icvlan_pkt; + unsigned long dvlan_osvlan_isvlan_pkt; + unsigned long dvlan_osvlan_icvlan_pkt; + unsigned long dvan_ocvlan_icvlan_pkt; + + /* L3/L4 Pkt type */ + unsigned long not_ip_pkt; + unsigned long ip4_tcp_pkt; + unsigned long ip4_udp_pkt; + unsigned long ip4_icmp_pkt; + unsigned long ip4_unknown_pkt; + unsigned long ip6_tcp_pkt; + unsigned long ip6_udp_pkt; + unsigned long ip6_icmp_pkt; + unsigned long ip6_unknown_pkt; + + /* Filter specific */ + unsigned long vlan_filter_match; + unsigned long sa_filter_fail; + unsigned long da_filter_fail; + unsigned long hash_filter_pass; + unsigned long l3_filter_match; + unsigned long l4_filter_match; + + /* RX context specific */ + unsigned long timestamp_dropped; + unsigned long rx_msg_type_no_ptp; + unsigned long rx_ptp_type_sync; + unsigned long rx_ptp_type_follow_up; + unsigned long rx_ptp_type_delay_req; + unsigned long rx_ptp_type_delay_resp; + unsigned long rx_ptp_type_pdelay_req; + unsigned long rx_ptp_type_pdelay_resp; + unsigned long rx_ptp_type_pdelay_follow_up; + unsigned long rx_ptp_announce; + unsigned long rx_ptp_mgmt; + unsigned long rx_ptp_signal; + unsigned long rx_ptp_resv_msg_type; +}; + +struct mac_link { + int port; + int duplex; + int speed; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ +}; + +struct sxgbe_core_ops { + /* MAC core initialization */ + void (*core_init)(void __iomem *ioaddr); + /* Dump MAC registers */ + void (*dump_regs)(void __iomem *ioaddr); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(void __iomem *ioaddr, + struct sxgbe_extra_stats *x); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(void __iomem *ioaddr, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n); + void (*enable_rx)(void __iomem *ioaddr, bool enable); + void (*enable_tx)(void __iomem *ioaddr, bool enable); + + /* controller version specific operations */ + int (*get_controller_version)(void __iomem *ioaddr); + + /* If supported then get the optional core features */ + unsigned int (*get_hw_feature)(void __iomem *ioaddr, + unsigned char feature_index); + /* adjust SXGBE speed */ + void (*set_speed)(void __iomem *ioaddr, unsigned char speed); + + /* EEE-LPI specific operations */ + void (*set_eee_mode)(void __iomem *ioaddr); + void (*reset_eee_mode)(void __iomem *ioaddr); + void (*set_eee_timer)(void __iomem *ioaddr, const int ls, + const int tw); + void (*set_eee_pls)(void __iomem *ioaddr, const int link); + + /* Enable disable checksum offload operations */ + void (*enable_rx_csum)(void __iomem *ioaddr); + void (*disable_rx_csum)(void __iomem *ioaddr); +}; + +const struct sxgbe_core_ops *sxgbe_get_core_ops(void); + +struct sxgbe_ops { + const struct sxgbe_core_ops *mac; + const struct sxgbe_desc_ops *desc; + const struct sxgbe_dma_ops *dma; + const struct sxgbe_mtl_ops *mtl; + struct mii_regs mii; /* MII register Addresses */ + struct mac_link link; + unsigned int ctrl_uid; + unsigned int ctrl_id; +}; + +/* SXGBE private data structures */ +struct sxgbe_tx_queue { + unsigned int irq_no; + struct sxgbe_priv_data *priv_ptr; + struct sxgbe_tx_norm_desc *dma_tx; + dma_addr_t dma_tx_phy; + dma_addr_t *tx_skbuff_dma; + struct sk_buff **tx_skbuff; + struct timer_list txtimer; + spinlock_t tx_lock; /* lock for tx queues */ + unsigned int cur_tx; + unsigned int dirty_tx; + u32 tx_count_frames; + u32 tx_coal_frames; + u32 tx_coal_timer; + int hwts_tx_en; + u16 prev_mss; + u8 queue_no; +}; + +struct sxgbe_rx_queue { + struct sxgbe_priv_data *priv_ptr; + struct sxgbe_rx_norm_desc *dma_rx; + struct sk_buff **rx_skbuff; + unsigned int cur_rx; + unsigned int dirty_rx; + unsigned int irq_no; + u32 rx_riwt; + dma_addr_t *rx_skbuff_dma; + dma_addr_t dma_rx_phy; + u8 queue_no; +}; + +/* SXGBE HW capabilities */ +struct sxgbe_hw_features { + /****** CAP [0] *******/ + unsigned int pmt_remote_wake_up; + unsigned int pmt_magic_frame; + /* IEEE 1588-2008 */ + unsigned int atime_stamp; + + unsigned int eee; + + unsigned int tx_csum_offload; + unsigned int rx_csum_offload; + unsigned int multi_macaddr; + unsigned int tstamp_srcselect; + unsigned int sa_vlan_insert; + + /****** CAP [1] *******/ + unsigned int rxfifo_size; + unsigned int txfifo_size; + unsigned int atstmap_hword; + unsigned int dcb_enable; + unsigned int splithead_enable; + unsigned int tcpseg_offload; + unsigned int debug_mem; + unsigned int rss_enable; + unsigned int hash_tsize; + unsigned int l3l4_filer_size; + + /* This value is in bytes and + * as mentioned in HW features + * of SXGBE data book + */ + unsigned int rx_mtl_qsize; + unsigned int tx_mtl_qsize; + + /****** CAP [2] *******/ + /* TX and RX number of channels */ + unsigned int rx_mtl_queues; + unsigned int tx_mtl_queues; + unsigned int rx_dma_channels; + unsigned int tx_dma_channels; + unsigned int pps_output_count; + unsigned int aux_input_count; +}; + +struct sxgbe_priv_data { + /* DMA descriptos */ + struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES]; + struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES]; + u8 cur_rx_qnum; + + unsigned int dma_tx_size; + unsigned int dma_rx_size; + unsigned int dma_buf_sz; + u32 rx_riwt; + + struct napi_struct napi; + + void __iomem *ioaddr; + struct net_device *dev; + struct device *device; + struct sxgbe_ops *hw; /* sxgbe specific ops */ + int no_csum_insertion; + int irq; + int rxcsum_insertion; + spinlock_t stats_lock; /* lock for tx/rx statatics */ + + struct phy_device *phydev; + int oldlink; + int speed; + int oldduplex; + struct mii_bus *mii; + int mii_irq[PHY_MAX_ADDR]; + u8 rx_pause; + u8 tx_pause; + + struct sxgbe_extra_stats xstats; + struct sxgbe_plat_data *plat; + struct sxgbe_hw_features hw_cap; + + u32 msg_enable; + + struct clk *sxgbe_clk; + int clk_csr; + unsigned int mode; + unsigned int default_addend; + + /* advanced time stamp support */ + u32 adv_ts; + int use_riwt; + struct ptp_clock *ptp_clock; + + /* tc control */ + int tx_tc; + int rx_tc; + /* EEE-LPI specific members */ + struct timer_list eee_ctrl_timer; + bool tx_path_in_lpi_mode; + int lpi_irq; + int eee_enabled; + int eee_active; + int tx_lpi_timer; +}; + +/* Function prototypes */ +struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device, + struct sxgbe_plat_data *plat_dat, + void __iomem *addr); +int sxgbe_drv_remove(struct net_device *ndev); +void sxgbe_set_ethtool_ops(struct net_device *netdev); +int sxgbe_mdio_unregister(struct net_device *ndev); +int sxgbe_mdio_register(struct net_device *ndev); +int sxgbe_register_platform(void); +void sxgbe_unregister_platform(void); + +#ifdef CONFIG_PM +int sxgbe_suspend(struct net_device *ndev); +int sxgbe_resume(struct net_device *ndev); +int sxgbe_freeze(struct net_device *ndev); +int sxgbe_restore(struct net_device *ndev); +#endif /* CONFIG_PM */ + +const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void); + +void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv); +bool sxgbe_eee_init(struct sxgbe_priv_data * const priv); +#endif /* __SXGBE_COMMON_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c new file mode 100644 index 000000000000..c4da7a2b002a --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c @@ -0,0 +1,262 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/export.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" + +/* MAC core initialization */ +static void sxgbe_core_init(void __iomem *ioaddr) +{ + u32 regval; + + /* TX configuration */ + regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG); + /* Other configurable parameters IFP, IPG, ISR, ISM + * needs to be set if needed + */ + regval |= SXGBE_TX_JABBER_DISABLE; + writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG); + + /* RX configuration */ + regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + /* Other configurable parameters CST, SPEN, USP, GPSLCE + * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be + * set if needed + */ + regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE; + writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +/* Dump MAC registers */ +static void sxgbe_core_dump_regs(void __iomem *ioaddr) +{ +} + +static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status) +{ + int status = 0; + int lpi_status; + + /* Reading this register shall clear all the LPI status bits */ + lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + + if (lpi_status & LPI_CTRL_STATUS_TLPIEN) + status |= TX_ENTRY_LPI_MODE; + if (lpi_status & LPI_CTRL_STATUS_TLPIEX) + status |= TX_EXIT_LPI_MODE; + if (lpi_status & LPI_CTRL_STATUS_RLPIEN) + status |= RX_ENTRY_LPI_MODE; + if (lpi_status & LPI_CTRL_STATUS_RLPIEX) + status |= RX_EXIT_LPI_MODE; + + return status; +} + +/* Handle extra events on specific interrupts hw dependent */ +static int sxgbe_core_host_irq_status(void __iomem *ioaddr, + struct sxgbe_extra_stats *x) +{ + int irq_status, status = 0; + + irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG); + + if (unlikely(irq_status & LPI_INT_STATUS)) + status |= sxgbe_get_lpi_status(ioaddr, irq_status); + + return status; +} + +/* Set power management mode (e.g. magic frame) */ +static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode) +{ +} + +/* Set/Get Unicast MAC addresses */ +static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + u32 high_word, low_word; + + high_word = (addr[5] << 8) | (addr[4]); + low_word = (addr[3] << 24) | (addr[2] << 16) | + (addr[1] << 8) | (addr[0]); + writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n)); + writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n)); +} + +static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + u32 high_word, low_word; + + high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n)); + low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n)); + + /* extract and assign address */ + addr[5] = (high_word & 0x0000FF00) >> 8; + addr[4] = (high_word & 0x000000FF); + addr[3] = (low_word & 0xFF000000) >> 24; + addr[2] = (low_word & 0x00FF0000) >> 16; + addr[1] = (low_word & 0x0000FF00) >> 8; + addr[0] = (low_word & 0x000000FF); +} + +static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable) +{ + u32 tx_config; + + tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG); + tx_config &= ~SXGBE_TX_ENABLE; + + if (enable) + tx_config |= SXGBE_TX_ENABLE; + writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG); +} + +static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable) +{ + u32 rx_config; + + rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + rx_config &= ~SXGBE_RX_ENABLE; + + if (enable) + rx_config |= SXGBE_RX_ENABLE; + writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +static int sxgbe_get_controller_version(void __iomem *ioaddr) +{ + return readl(ioaddr + SXGBE_CORE_VERSION_REG); +} + +/* If supported then get the optional core features */ +static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr, + unsigned char feature_index) +{ + return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index))); +} + +static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed) +{ + u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG); + + /* clear the speed bits */ + tx_cfg &= ~0x60000000; + tx_cfg |= (speed << SXGBE_SPEED_LSHIFT); + + /* set the speed */ + writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); +} + +static void sxgbe_set_eee_mode(void __iomem *ioaddr) +{ + u32 ctrl; + + /* Enable the LPI mode for transmit path with Tx automate bit set. + * When Tx Automate bit is set, MAC internally handles the entry + * to LPI mode after all outstanding and pending packets are + * transmitted. + */ + ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA; + writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); +} + +static void sxgbe_reset_eee_mode(void __iomem *ioaddr) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA); + writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); +} + +static void sxgbe_set_eee_pls(void __iomem *ioaddr, const int link) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + + /* If the PHY link status is UP then set PLS */ + if (link) + ctrl |= LPI_CTRL_STATUS_PLS; + else + ctrl &= ~LPI_CTRL_STATUS_PLS; + + writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); +} + +static void sxgbe_set_eee_timer(void __iomem *ioaddr, + const int ls, const int tw) +{ + int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL); +} + +static void sxgbe_enable_rx_csum(void __iomem *ioaddr) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE; + writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +static void sxgbe_disable_rx_csum(void __iomem *ioaddr) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE; + writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +static const struct sxgbe_core_ops core_ops = { + .core_init = sxgbe_core_init, + .dump_regs = sxgbe_core_dump_regs, + .host_irq_status = sxgbe_core_host_irq_status, + .pmt = sxgbe_core_pmt, + .set_umac_addr = sxgbe_core_set_umac_addr, + .get_umac_addr = sxgbe_core_get_umac_addr, + .enable_rx = sxgbe_enable_rx, + .enable_tx = sxgbe_enable_tx, + .get_controller_version = sxgbe_get_controller_version, + .get_hw_feature = sxgbe_get_hw_feature, + .set_speed = sxgbe_core_set_speed, + .set_eee_mode = sxgbe_set_eee_mode, + .reset_eee_mode = sxgbe_reset_eee_mode, + .set_eee_timer = sxgbe_set_eee_timer, + .set_eee_pls = sxgbe_set_eee_pls, + .enable_rx_csum = sxgbe_enable_rx_csum, + .disable_rx_csum = sxgbe_disable_rx_csum, +}; + +const struct sxgbe_core_ops *sxgbe_get_core_ops(void) +{ + return &core_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c new file mode 100644 index 000000000000..e896dbbd2e15 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c @@ -0,0 +1,515 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bitops.h> +#include <linux/export.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +#include "sxgbe_common.h" +#include "sxgbe_dma.h" +#include "sxgbe_desc.h" + +/* DMA TX descriptor ring initialization */ +static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.own_bit = 0; +} + +static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse, + u32 total_hdr_len, u32 tcp_hdr_len, + u32 tcp_payload_len) +{ + p->tdes23.tx_rd_des23.tse_bit = is_tse; + p->tdes23.tx_rd_des23.buf1_size = total_hdr_len; + p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4; + p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len = tcp_payload_len; +} + +/* Assign buffer lengths for descriptor */ +static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd, + int buf1_len, int pkt_len, int cksum) +{ + p->tdes23.tx_rd_des23.first_desc = is_fd; + p->tdes23.tx_rd_des23.buf1_size = buf1_len; + + p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; + + if (cksum) + p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; +} + +/* Set VLAN control information */ +static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl) +{ + p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl; +} + +/* Set the owner of Normal descriptor */ +static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.own_bit = 1; +} + +/* Get the owner of Normal descriptor */ +static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.own_bit; +} + +/* Invoked by the xmit function to close the tx descriptor */ +static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.last_desc = 1; + p->tdes23.tx_rd_des23.int_on_com = 1; +} + +/* Clean the tx descriptor as soon as the tx irq is received */ +static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p) +{ + memset(p, 0, sizeof(*p)); +} + +/* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted + */ +static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.int_on_com = 0; +} + +/* Last tx segment reports the transmit status */ +static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.last_desc; +} + +/* Get the buffer size from the descriptor */ +static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.buf1_size; +} + +/* Set tx timestamp enable bit */ +static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.timestmp_enable = 1; +} + +/* get tx timestamp status */ +static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.timestmp_enable; +} + +/* TX Context Descripto Specific */ +static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p) +{ + p->ctxt_bit = 1; +} + +/* Set the owner of TX context descriptor */ +static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p) +{ + p->own_bit = 1; +} + +/* Get the owner of TX context descriptor */ +static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p) +{ + return p->own_bit; +} + +/* Set TX mss in TX context Descriptor */ +static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss) +{ + p->maxseg_size = mss; +} + +/* Get TX mss from TX context Descriptor */ +static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p) +{ + return p->maxseg_size; +} + +/* Set TX tcmssv in TX context Descriptor */ +static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p) +{ + p->tcmssv = 1; +} + +/* Reset TX ostc in TX context Descriptor */ +static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p) +{ + p->ostc = 0; +} + +/* Set IVLAN information */ +static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p, + int is_ivlanvalid, int ivlan_tag, + int ivlan_ctl) +{ + if (is_ivlanvalid) { + p->ivlan_tag_valid = is_ivlanvalid; + p->ivlan_tag = ivlan_tag; + p->ivlan_tag_ctl = ivlan_ctl; + } +} + +/* Return IVLAN Tag */ +static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p) +{ + return p->ivlan_tag; +} + +/* Set VLAN Tag */ +static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p, + int is_vlanvalid, int vlan_tag) +{ + if (is_vlanvalid) { + p->vltag_valid = is_vlanvalid; + p->vlan_tag = vlan_tag; + } +} + +/* Return VLAN Tag */ +static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p) +{ + return p->vlan_tag; +} + +/* Set Time stamp */ +static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p, + u8 ostc_enable, u64 tstamp) +{ + if (ostc_enable) { + p->ostc = ostc_enable; + p->tstamp_lo = (u32) tstamp; + p->tstamp_hi = (u32) (tstamp>>32); + } +} +/* Close TX context descriptor */ +static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p) +{ + p->own_bit = 1; +} + +/* WB status of context descriptor */ +static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p) +{ + return p->ctxt_desc_err; +} + +/* DMA RX descriptor ring initialization */ +static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic, + int mode, int end) +{ + p->rdes23.rx_rd_des23.own_bit = 1; + if (disable_rx_ic) + p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic; +} + +/* Get RX own bit */ +static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_rd_des23.own_bit; +} + +/* Set RX own bit */ +static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p) +{ + p->rdes23.rx_rd_des23.own_bit = 1; +} + +/* Get the receive frame size */ +static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_wb_des23.pkt_len; +} + +/* Return first Descriptor status */ +static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_wb_des23.first_desc; +} + +/* Return Last Descriptor status */ +static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_wb_des23.last_desc; +} + + +/* Return the RX status looking at the WB fields */ +static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p, + struct sxgbe_extra_stats *x, int *checksum) +{ + int status = 0; + + *checksum = CHECKSUM_UNNECESSARY; + if (p->rdes23.rx_wb_des23.err_summary) { + switch (p->rdes23.rx_wb_des23.err_l2_type) { + case RX_GMII_ERR: + status = -EINVAL; + x->rx_code_gmii_err++; + break; + case RX_WATCHDOG_ERR: + status = -EINVAL; + x->rx_watchdog_err++; + break; + case RX_CRC_ERR: + status = -EINVAL; + x->rx_crc_err++; + break; + case RX_GAINT_ERR: + status = -EINVAL; + x->rx_gaint_pkt_err++; + break; + case RX_IP_HDR_ERR: + *checksum = CHECKSUM_NONE; + x->ip_hdr_err++; + break; + case RX_PAYLOAD_ERR: + *checksum = CHECKSUM_NONE; + x->ip_payload_err++; + break; + case RX_OVERFLOW_ERR: + status = -EINVAL; + x->overflow_error++; + break; + default: + pr_err("Invalid Error type\n"); + break; + } + } else { + switch (p->rdes23.rx_wb_des23.err_l2_type) { + case RX_LEN_PKT: + x->len_pkt++; + break; + case RX_MACCTL_PKT: + x->mac_ctl_pkt++; + break; + case RX_DCBCTL_PKT: + x->dcb_ctl_pkt++; + break; + case RX_ARP_PKT: + x->arp_pkt++; + break; + case RX_OAM_PKT: + x->oam_pkt++; + break; + case RX_UNTAG_PKT: + x->untag_okt++; + break; + case RX_OTHER_PKT: + x->other_pkt++; + break; + case RX_SVLAN_PKT: + x->svlan_tag_pkt++; + break; + case RX_CVLAN_PKT: + x->cvlan_tag_pkt++; + break; + case RX_DVLAN_OCVLAN_ICVLAN_PKT: + x->dvlan_ocvlan_icvlan_pkt++; + break; + case RX_DVLAN_OSVLAN_ISVLAN_PKT: + x->dvlan_osvlan_isvlan_pkt++; + break; + case RX_DVLAN_OSVLAN_ICVLAN_PKT: + x->dvlan_osvlan_icvlan_pkt++; + break; + case RX_DVLAN_OCVLAN_ISVLAN_PKT: + x->dvlan_ocvlan_icvlan_pkt++; + break; + default: + pr_err("Invalid L2 Packet type\n"); + break; + } + } + + /* L3/L4 Pkt type */ + switch (p->rdes23.rx_wb_des23.layer34_pkt_type) { + case RX_NOT_IP_PKT: + x->not_ip_pkt++; + break; + case RX_IPV4_TCP_PKT: + x->ip4_tcp_pkt++; + break; + case RX_IPV4_UDP_PKT: + x->ip4_udp_pkt++; + break; + case RX_IPV4_ICMP_PKT: + x->ip4_icmp_pkt++; + break; + case RX_IPV4_UNKNOWN_PKT: + x->ip4_unknown_pkt++; + break; + case RX_IPV6_TCP_PKT: + x->ip6_tcp_pkt++; + break; + case RX_IPV6_UDP_PKT: + x->ip6_udp_pkt++; + break; + case RX_IPV6_ICMP_PKT: + x->ip6_icmp_pkt++; + break; + case RX_IPV6_UNKNOWN_PKT: + x->ip6_unknown_pkt++; + break; + default: + pr_err("Invalid L3/L4 Packet type\n"); + break; + } + + /* Filter */ + if (p->rdes23.rx_wb_des23.vlan_filter_match) + x->vlan_filter_match++; + + if (p->rdes23.rx_wb_des23.sa_filter_fail) { + status = -EINVAL; + x->sa_filter_fail++; + } + if (p->rdes23.rx_wb_des23.da_filter_fail) { + status = -EINVAL; + x->da_filter_fail++; + } + if (p->rdes23.rx_wb_des23.hash_filter_pass) + x->hash_filter_pass++; + + if (p->rdes23.rx_wb_des23.l3_filter_match) + x->l3_filter_match++; + + if (p->rdes23.rx_wb_des23.l4_filter_match) + x->l4_filter_match++; + + return status; +} + +/* Get own bit of context descriptor */ +static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p) +{ + return p->own_bit; +} + +/* Set own bit for context descriptor */ +static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p) +{ + p->own_bit = 1; +} + + +/* Return the reception status looking at Context control information */ +static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p, + struct sxgbe_extra_stats *x) +{ + if (p->tstamp_dropped) + x->timestamp_dropped++; + + /* ptp */ + if (p->ptp_msgtype == RX_NO_PTP) + x->rx_msg_type_no_ptp++; + else if (p->ptp_msgtype == RX_PTP_SYNC) + x->rx_ptp_type_sync++; + else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP) + x->rx_ptp_type_follow_up++; + else if (p->ptp_msgtype == RX_PTP_DELAY_REQ) + x->rx_ptp_type_delay_req++; + else if (p->ptp_msgtype == RX_PTP_DELAY_RESP) + x->rx_ptp_type_delay_resp++; + else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ) + x->rx_ptp_type_pdelay_req++; + else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP) + x->rx_ptp_type_pdelay_resp++; + else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP) + x->rx_ptp_type_pdelay_follow_up++; + else if (p->ptp_msgtype == RX_PTP_ANNOUNCE) + x->rx_ptp_announce++; + else if (p->ptp_msgtype == RX_PTP_MGMT) + x->rx_ptp_mgmt++; + else if (p->ptp_msgtype == RX_PTP_SIGNAL) + x->rx_ptp_signal++; + else if (p->ptp_msgtype == RX_PTP_RESV_MSG) + x->rx_ptp_resv_msg_type++; +} + +/* Get rx timestamp status */ +static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p) +{ + if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) { + pr_err("Time stamp corrupted\n"); + return 0; + } + + return p->tstamp_available; +} + + +static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p) +{ + u64 ns; + + ns = p->tstamp_lo; + ns |= ((u64)p->tstamp_hi) << 32; + + return ns; +} + +static const struct sxgbe_desc_ops desc_ops = { + .init_tx_desc = sxgbe_init_tx_desc, + .tx_desc_enable_tse = sxgbe_tx_desc_enable_tse, + .prepare_tx_desc = sxgbe_prepare_tx_desc, + .tx_vlanctl_desc = sxgbe_tx_vlanctl_desc, + .set_tx_owner = sxgbe_set_tx_owner, + .get_tx_owner = sxgbe_get_tx_owner, + .close_tx_desc = sxgbe_close_tx_desc, + .release_tx_desc = sxgbe_release_tx_desc, + .clear_tx_ic = sxgbe_clear_tx_ic, + .get_tx_ls = sxgbe_get_tx_ls, + .get_tx_len = sxgbe_get_tx_len, + .tx_enable_tstamp = sxgbe_tx_enable_tstamp, + .get_tx_timestamp_status = sxgbe_get_tx_timestamp_status, + .tx_ctxt_desc_set_ctxt = sxgbe_tx_ctxt_desc_set_ctxt, + .tx_ctxt_desc_set_owner = sxgbe_tx_ctxt_desc_set_owner, + .get_tx_ctxt_owner = sxgbe_tx_ctxt_desc_get_owner, + .tx_ctxt_desc_set_mss = sxgbe_tx_ctxt_desc_set_mss, + .tx_ctxt_desc_get_mss = sxgbe_tx_ctxt_desc_get_mss, + .tx_ctxt_desc_set_tcmssv = sxgbe_tx_ctxt_desc_set_tcmssv, + .tx_ctxt_desc_reset_ostc = sxgbe_tx_ctxt_desc_reset_ostc, + .tx_ctxt_desc_set_ivlantag = sxgbe_tx_ctxt_desc_set_ivlantag, + .tx_ctxt_desc_get_ivlantag = sxgbe_tx_ctxt_desc_get_ivlantag, + .tx_ctxt_desc_set_vlantag = sxgbe_tx_ctxt_desc_set_vlantag, + .tx_ctxt_desc_get_vlantag = sxgbe_tx_ctxt_desc_get_vlantag, + .tx_ctxt_set_tstamp = sxgbe_tx_ctxt_desc_set_tstamp, + .close_tx_ctxt_desc = sxgbe_tx_ctxt_desc_close, + .get_tx_ctxt_cde = sxgbe_tx_ctxt_desc_get_cde, + .init_rx_desc = sxgbe_init_rx_desc, + .get_rx_owner = sxgbe_get_rx_owner, + .set_rx_owner = sxgbe_set_rx_owner, + .get_rx_frame_len = sxgbe_get_rx_frame_len, + .get_rx_fd_status = sxgbe_get_rx_fd_status, + .get_rx_ld_status = sxgbe_get_rx_ld_status, + .rx_wbstatus = sxgbe_rx_wbstatus, + .get_rx_ctxt_owner = sxgbe_get_rx_ctxt_owner, + .set_rx_ctxt_owner = sxgbe_set_ctxt_rx_owner, + .rx_ctxt_wbstatus = sxgbe_rx_ctxt_wbstatus, + .get_rx_ctxt_tstamp_status = sxgbe_get_rx_ctxt_tstamp_status, + .get_timestamp = sxgbe_get_rx_timestamp, +}; + +const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void) +{ + return &desc_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h new file mode 100644 index 000000000000..838cb9fb0ea9 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h @@ -0,0 +1,298 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_DESC_H__ +#define __SXGBE_DESC_H__ + +#define SXGBE_DESC_SIZE_BYTES 16 + +/* forward declaration */ +struct sxgbe_extra_stats; + +/* Transmit checksum insertion control */ +enum tdes_csum_insertion { + cic_disabled = 0, /* Checksum Insertion Control */ + cic_only_ip = 1, /* Only IP header */ + /* IP header but pseudoheader is not calculated */ + cic_no_pseudoheader = 2, + cic_full = 3, /* IP header and pseudoheader */ +}; + +struct sxgbe_tx_norm_desc { + u64 tdes01; /* buf1 address */ + union { + /* TX Read-Format Desc 2,3 */ + struct { + /* TDES2 */ + u32 buf1_size:14; + u32 vlan_tag_ctl:2; + u32 buf2_size:14; + u32 timestmp_enable:1; + u32 int_on_com:1; + /* TDES3 */ + union { + u32 tcp_payload_len:18; + struct { + u32 total_pkt_len:15; + u32 reserved1:1; + u32 cksum_ctl:2; + } cksum_pktlen; + } tx_pkt_len; + + u32 tse_bit:1; + u32 tcp_hdr_len:4; + u32 sa_insert_ctl:3; + u32 crc_pad_ctl:2; + u32 last_desc:1; + u32 first_desc:1; + u32 ctxt_bit:1; + u32 own_bit:1; + } tx_rd_des23; + + /* tx write back Desc 2,3 */ + struct { + /* WB TES2 */ + u32 reserved1; + /* WB TES3 */ + u32 reserved2:31; + u32 own_bit:1; + } tx_wb_des23; + } tdes23; +}; + +struct sxgbe_rx_norm_desc { + union { + u32 rdes0; /* buf1 address */ + struct { + u32 out_vlan_tag:16; + u32 in_vlan_tag:16; + } wb_rx_des0; + } rd_wb_des0; + + union { + u32 rdes1; /* buf2 address or buf1[63:32] */ + u32 rss_hash; /* Write-back RX */ + } rd_wb_des1; + + union { + /* RX Read format Desc 2,3 */ + struct{ + /* RDES2 */ + u32 buf2_addr; + /* RDES3 */ + u32 buf2_hi_addr:30; + u32 int_on_com:1; + u32 own_bit:1; + } rx_rd_des23; + + /* RX write back */ + struct{ + /* WB RDES2 */ + u32 hdr_len:10; + u32 rdes2_reserved:2; + u32 elrd_val:1; + u32 iovt_sel:1; + u32 res_pkt:1; + u32 vlan_filter_match:1; + u32 sa_filter_fail:1; + u32 da_filter_fail:1; + u32 hash_filter_pass:1; + u32 macaddr_filter_match:8; + u32 l3_filter_match:1; + u32 l4_filter_match:1; + u32 l34_filter_num:3; + + /* WB RDES3 */ + u32 pkt_len:14; + u32 rdes3_reserved:1; + u32 err_summary:1; + u32 err_l2_type:4; + u32 layer34_pkt_type:4; + u32 no_coagulation_pkt:1; + u32 in_seq_pkt:1; + u32 rss_valid:1; + u32 context_des_avail:1; + u32 last_desc:1; + u32 first_desc:1; + u32 recv_context_desc:1; + u32 own_bit:1; + } rx_wb_des23; + } rdes23; +}; + +/* Context descriptor structure */ +struct sxgbe_tx_ctxt_desc { + u32 tstamp_lo; + u32 tstamp_hi; + u32 maxseg_size:15; + u32 reserved1:1; + u32 ivlan_tag:16; + u32 vlan_tag:16; + u32 vltag_valid:1; + u32 ivlan_tag_valid:1; + u32 ivlan_tag_ctl:2; + u32 reserved2:3; + u32 ctxt_desc_err:1; + u32 reserved3:2; + u32 ostc:1; + u32 tcmssv:1; + u32 reserved4:2; + u32 ctxt_bit:1; + u32 own_bit:1; +}; + +struct sxgbe_rx_ctxt_desc { + u32 tstamp_lo; + u32 tstamp_hi; + u32 reserved1; + u32 ptp_msgtype:4; + u32 tstamp_available:1; + u32 ptp_rsp_err:1; + u32 tstamp_dropped:1; + u32 reserved2:23; + u32 rx_ctxt_desc:1; + u32 own_bit:1; +}; + +struct sxgbe_desc_ops { + /* DMA TX descriptor ring initialization */ + void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p); + + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse, + u32 total_hdr_len, u32 tcp_hdr_len, + u32 tcp_payload_len); + + /* Assign buffer lengths for descriptor */ + void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd, + int buf1_len, int pkt_len, int cksum); + + /* Set VLAN control information */ + void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl); + + /* Set the owner of the descriptor */ + void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p); + + /* Get the owner of the descriptor */ + int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p); + + /* Invoked by the xmit function to close the tx descriptor */ + void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p); + + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p); + + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted + */ + void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p); + + /* Last tx segment reports the transmit status */ + int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p); + + /* Get the buffer size from the descriptor */ + int (*get_tx_len)(struct sxgbe_tx_norm_desc *p); + + /* Set tx timestamp enable bit */ + void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p); + + /* get tx timestamp status */ + int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p); + + /* TX Context Descripto Specific */ + void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p); + + /* Set the owner of the TX context descriptor */ + void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p); + + /* Get the owner of the TX context descriptor */ + int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p); + + /* Set TX mss */ + void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss); + + /* Set TX mss */ + int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p); + + /* Set TX tcmssv */ + void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p); + + /* Reset TX ostc */ + void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p); + + /* Set IVLAN information */ + void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p, + int is_ivlanvalid, int ivlan_tag, + int ivlan_ctl); + + /* Return IVLAN Tag */ + int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p); + + /* Set VLAN Tag */ + void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p, + int is_vlanvalid, int vlan_tag); + + /* Return VLAN Tag */ + int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p); + + /* Set Time stamp */ + void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p, + u8 ostc_enable, u64 tstamp); + + /* Close TX context descriptor */ + void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p); + + /* WB status of context descriptor */ + int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p); + + /* DMA RX descriptor ring initialization */ + void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic, + int mode, int end); + + /* Get own bit */ + int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p); + + /* Set own bit */ + void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); + + /* Get the receive frame size */ + int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p); + + /* Return first Descriptor status */ + int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p); + + /* Return first Descriptor status */ + int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p); + + /* Return the reception status looking at the RDES1 */ + int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p, + struct sxgbe_extra_stats *x, int *checksum); + + /* Get own bit */ + int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p); + + /* Set own bit */ + void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p); + + /* Return the reception status looking at Context control information */ + void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p, + struct sxgbe_extra_stats *x); + + /* Get rx timestamp status */ + int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p); + + /* Get timestamp value for rx, need to check this */ + u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p); +}; + +const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void); + +#endif /* __SXGBE_DESC_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c new file mode 100644 index 000000000000..28f89c41d0cd --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c @@ -0,0 +1,382 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +#include "sxgbe_common.h" +#include "sxgbe_dma.h" +#include "sxgbe_reg.h" +#include "sxgbe_desc.h" + +/* DMA core initialization */ +static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) +{ + int retry_count = 10; + u32 reg_val; + + /* reset the DMA */ + writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG); + while (retry_count--) { + if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) & + SXGBE_DMA_SOFT_RESET)) + break; + mdelay(10); + } + + if (retry_count < 0) + return -EBUSY; + + reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); + + /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register. + * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register. + * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256]. + * Set burst_map irrespective of fix_burst value. + */ + if (!fix_burst) + reg_val |= SXGBE_DMA_AXI_UNDEF_BURST; + + /* write burst len map */ + reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT); + + writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); + + return 0; +} + +static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num, + int fix_burst, int pbl, dma_addr_t dma_tx, + dma_addr_t dma_rx, int t_rsize, int r_rsize) +{ + u32 reg_val; + dma_addr_t dma_addr; + + reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num)); + /* set the pbl */ + if (fix_burst) { + reg_val |= SXGBE_DMA_PBL_X8MODE; + writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num)); + /* program the TX pbl */ + reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); + reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT); + writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); + /* program the RX pbl */ + reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num)); + reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT); + writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num)); + } + + /* program desc registers */ + writel(upper_32_bits(dma_tx), + ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num)); + writel(lower_32_bits(dma_tx), + ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num)); + + writel(upper_32_bits(dma_rx), + ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num)); + writel(lower_32_bits(dma_rx), + ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num)); + + /* program tail pointers */ + /* assumption: upper 32 bits are constant and + * same as TX/RX desc list + */ + dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES); + writel(lower_32_bits(dma_addr), + ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num)); + + dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES); + writel(lower_32_bits(dma_addr), + ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num)); + /* program the ring sizes */ + writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num)); + writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num)); + + /* Enable TX/RX interrupts */ + writel(SXGBE_DMA_ENA_INT, + ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num)); +} + +static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num) +{ + u32 tx_config; + + tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); + tx_config |= SXGBE_TX_START_DMA; + writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); +} + +static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum) +{ + /* Enable TX/RX interrupts */ + writel(SXGBE_DMA_ENA_INT, + ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum)); +} + +static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum) +{ + /* Disable TX/RX interrupts */ + writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum)); +} + +static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels) +{ + int cnum; + u32 tx_ctl_reg; + + for (cnum = 0; cnum < tchannels; cnum++) { + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + tx_ctl_reg |= SXGBE_TX_ENABLE; + writel(tx_ctl_reg, + ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + } +} + +static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum) +{ + u32 tx_ctl_reg; + + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); + tx_ctl_reg |= SXGBE_TX_ENABLE; + writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); +} + +static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum) +{ + u32 tx_ctl_reg; + + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); + tx_ctl_reg &= ~(SXGBE_TX_ENABLE); + writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); +} + +static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels) +{ + int cnum; + u32 tx_ctl_reg; + + for (cnum = 0; cnum < tchannels; cnum++) { + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + tx_ctl_reg &= ~(SXGBE_TX_ENABLE); + writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + } +} + +static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels) +{ + int cnum; + u32 rx_ctl_reg; + + for (cnum = 0; cnum < rchannels; cnum++) { + rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + rx_ctl_reg |= SXGBE_RX_ENABLE; + writel(rx_ctl_reg, + ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + } +} + +static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels) +{ + int cnum; + u32 rx_ctl_reg; + + for (cnum = 0; cnum < rchannels; cnum++) { + rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + rx_ctl_reg &= ~(SXGBE_RX_ENABLE); + writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + } +} + +static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x) +{ + u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + u32 clear_val = 0; + u32 ret_val = 0; + + /* TX Normal Interrupt Summary */ + if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) { + x->normal_irq_n++; + if (int_status & SXGBE_DMA_INT_STATUS_TI) { + ret_val |= handle_tx; + x->tx_normal_irq_n++; + clear_val |= SXGBE_DMA_INT_STATUS_TI; + } + + if (int_status & SXGBE_DMA_INT_STATUS_TBU) { + x->tx_underflow_irq++; + ret_val |= tx_bump_tc; + clear_val |= SXGBE_DMA_INT_STATUS_TBU; + } + } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) { + /* TX Abnormal Interrupt Summary */ + if (int_status & SXGBE_DMA_INT_STATUS_TPS) { + ret_val |= tx_hard_error; + clear_val |= SXGBE_DMA_INT_STATUS_TPS; + x->tx_process_stopped_irq++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_FBE) { + ret_val |= tx_hard_error; + x->fatal_bus_error_irq++; + + /* Assumption: FBE bit is the combination of + * all the bus access erros and cleared when + * the respective error bits cleared + */ + + /* check for actual cause */ + if (int_status & SXGBE_DMA_INT_STATUS_TEB0) { + x->tx_read_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_TEB0; + } else { + x->tx_write_transfer_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_TEB1) { + x->tx_desc_access_err++; + clear_val |= SXGBE_DMA_INT_STATUS_TEB1; + } else { + x->tx_buffer_access_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_TEB2) { + x->tx_data_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_TEB2; + } + } + + /* context descriptor error */ + if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) { + x->tx_ctxt_desc_err++; + clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR; + } + } + + /* clear the served bits */ + writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + + return ret_val; +} + +static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x) +{ + u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + u32 clear_val = 0; + u32 ret_val = 0; + + /* RX Normal Interrupt Summary */ + if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) { + x->normal_irq_n++; + if (int_status & SXGBE_DMA_INT_STATUS_RI) { + ret_val |= handle_rx; + x->rx_normal_irq_n++; + clear_val |= SXGBE_DMA_INT_STATUS_RI; + } + } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) { + /* RX Abnormal Interrupt Summary */ + if (int_status & SXGBE_DMA_INT_STATUS_RBU) { + ret_val |= rx_bump_tc; + clear_val |= SXGBE_DMA_INT_STATUS_RBU; + x->rx_underflow_irq++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_RPS) { + ret_val |= rx_hard_error; + clear_val |= SXGBE_DMA_INT_STATUS_RPS; + x->rx_process_stopped_irq++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_FBE) { + ret_val |= rx_hard_error; + x->fatal_bus_error_irq++; + + /* Assumption: FBE bit is the combination of + * all the bus access erros and cleared when + * the respective error bits cleared + */ + + /* check for actual cause */ + if (int_status & SXGBE_DMA_INT_STATUS_REB0) { + x->rx_read_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_REB0; + } else { + x->rx_write_transfer_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_REB1) { + x->rx_desc_access_err++; + clear_val |= SXGBE_DMA_INT_STATUS_REB1; + } else { + x->rx_buffer_access_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_REB2) { + x->rx_data_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_REB2; + } + } + } + + /* clear the served bits */ + writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + + return ret_val; +} + +/* Program the HW RX Watchdog */ +static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt) +{ + u32 que_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) { + writel(riwt, + ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num)); + } +} + +static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num)); + ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE; + writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num)); +} + +static const struct sxgbe_dma_ops sxgbe_dma_ops = { + .init = sxgbe_dma_init, + .cha_init = sxgbe_dma_channel_init, + .enable_dma_transmission = sxgbe_enable_dma_transmission, + .enable_dma_irq = sxgbe_enable_dma_irq, + .disable_dma_irq = sxgbe_disable_dma_irq, + .start_tx = sxgbe_dma_start_tx, + .start_tx_queue = sxgbe_dma_start_tx_queue, + .stop_tx = sxgbe_dma_stop_tx, + .stop_tx_queue = sxgbe_dma_stop_tx_queue, + .start_rx = sxgbe_dma_start_rx, + .stop_rx = sxgbe_dma_stop_rx, + .tx_dma_int_status = sxgbe_tx_dma_int_status, + .rx_dma_int_status = sxgbe_rx_dma_int_status, + .rx_watchdog = sxgbe_dma_rx_watchdog, + .enable_tso = sxgbe_enable_tso, +}; + +const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void) +{ + return &sxgbe_dma_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h new file mode 100644 index 000000000000..1607b54c9bb0 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h @@ -0,0 +1,50 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_DMA_H__ +#define __SXGBE_DMA_H__ + +/* forward declaration */ +struct sxgbe_extra_stats; + +#define SXGBE_DMA_BLENMAP_LSHIFT 1 +#define SXGBE_DMA_TXPBL_LSHIFT 16 +#define SXGBE_DMA_RXPBL_LSHIFT 16 +#define DEFAULT_DMA_PBL 8 + +struct sxgbe_dma_ops { + /* DMA core initialization */ + int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map); + void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst, + int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx, + int t_rzie, int r_rsize); + void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum); + void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum); + void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum); + void (*start_tx)(void __iomem *ioaddr, int tchannels); + void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum); + void (*stop_tx)(void __iomem *ioaddr, int tchannels); + void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum); + void (*start_rx)(void __iomem *ioaddr, int rchannels); + void (*stop_rx)(void __iomem *ioaddr, int rchannels); + int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x); + int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x); + /* Program the HW RX Watchdog */ + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt); + /* Enable TSO for each DMA channel */ + void (*enable_tso)(void __iomem *ioaddr, u8 chan_num); +}; + +const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void); + +#endif /* __SXGBE_CORE_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c new file mode 100644 index 000000000000..0415fa50eeb7 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -0,0 +1,524 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/ptp_clock_kernel.h> + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" +#include "sxgbe_dma.h" + +struct sxgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define SXGBE_STAT(m) \ +{ \ + #m, \ + FIELD_SIZEOF(struct sxgbe_extra_stats, m), \ + offsetof(struct sxgbe_priv_data, xstats.m) \ +} + +static const struct sxgbe_stats sxgbe_gstrings_stats[] = { + /* TX/RX IRQ events */ + SXGBE_STAT(tx_process_stopped_irq), + SXGBE_STAT(tx_ctxt_desc_err), + SXGBE_STAT(tx_threshold), + SXGBE_STAT(rx_threshold), + SXGBE_STAT(tx_pkt_n), + SXGBE_STAT(rx_pkt_n), + SXGBE_STAT(normal_irq_n), + SXGBE_STAT(tx_normal_irq_n), + SXGBE_STAT(rx_normal_irq_n), + SXGBE_STAT(napi_poll), + SXGBE_STAT(tx_clean), + SXGBE_STAT(tx_reset_ic_bit), + SXGBE_STAT(rx_process_stopped_irq), + SXGBE_STAT(rx_underflow_irq), + + /* Bus access errors */ + SXGBE_STAT(fatal_bus_error_irq), + SXGBE_STAT(tx_read_transfer_err), + SXGBE_STAT(tx_write_transfer_err), + SXGBE_STAT(tx_desc_access_err), + SXGBE_STAT(tx_buffer_access_err), + SXGBE_STAT(tx_data_transfer_err), + SXGBE_STAT(rx_read_transfer_err), + SXGBE_STAT(rx_write_transfer_err), + SXGBE_STAT(rx_desc_access_err), + SXGBE_STAT(rx_buffer_access_err), + SXGBE_STAT(rx_data_transfer_err), + + /* EEE-LPI stats */ + SXGBE_STAT(tx_lpi_entry_n), + SXGBE_STAT(tx_lpi_exit_n), + SXGBE_STAT(rx_lpi_entry_n), + SXGBE_STAT(rx_lpi_exit_n), + SXGBE_STAT(eee_wakeup_error_n), + + /* RX specific */ + /* L2 error */ + SXGBE_STAT(rx_code_gmii_err), + SXGBE_STAT(rx_watchdog_err), + SXGBE_STAT(rx_crc_err), + SXGBE_STAT(rx_gaint_pkt_err), + SXGBE_STAT(ip_hdr_err), + SXGBE_STAT(ip_payload_err), + SXGBE_STAT(overflow_error), + + /* L2 Pkt type */ + SXGBE_STAT(len_pkt), + SXGBE_STAT(mac_ctl_pkt), + SXGBE_STAT(dcb_ctl_pkt), + SXGBE_STAT(arp_pkt), + SXGBE_STAT(oam_pkt), + SXGBE_STAT(untag_okt), + SXGBE_STAT(other_pkt), + SXGBE_STAT(svlan_tag_pkt), + SXGBE_STAT(cvlan_tag_pkt), + SXGBE_STAT(dvlan_ocvlan_icvlan_pkt), + SXGBE_STAT(dvlan_osvlan_isvlan_pkt), + SXGBE_STAT(dvlan_osvlan_icvlan_pkt), + SXGBE_STAT(dvan_ocvlan_icvlan_pkt), + + /* L3/L4 Pkt type */ + SXGBE_STAT(not_ip_pkt), + SXGBE_STAT(ip4_tcp_pkt), + SXGBE_STAT(ip4_udp_pkt), + SXGBE_STAT(ip4_icmp_pkt), + SXGBE_STAT(ip4_unknown_pkt), + SXGBE_STAT(ip6_tcp_pkt), + SXGBE_STAT(ip6_udp_pkt), + SXGBE_STAT(ip6_icmp_pkt), + SXGBE_STAT(ip6_unknown_pkt), + + /* Filter specific */ + SXGBE_STAT(vlan_filter_match), + SXGBE_STAT(sa_filter_fail), + SXGBE_STAT(da_filter_fail), + SXGBE_STAT(hash_filter_pass), + SXGBE_STAT(l3_filter_match), + SXGBE_STAT(l4_filter_match), + + /* RX context specific */ + SXGBE_STAT(timestamp_dropped), + SXGBE_STAT(rx_msg_type_no_ptp), + SXGBE_STAT(rx_ptp_type_sync), + SXGBE_STAT(rx_ptp_type_follow_up), + SXGBE_STAT(rx_ptp_type_delay_req), + SXGBE_STAT(rx_ptp_type_delay_resp), + SXGBE_STAT(rx_ptp_type_pdelay_req), + SXGBE_STAT(rx_ptp_type_pdelay_resp), + SXGBE_STAT(rx_ptp_type_pdelay_follow_up), + SXGBE_STAT(rx_ptp_announce), + SXGBE_STAT(rx_ptp_mgmt), + SXGBE_STAT(rx_ptp_signal), + SXGBE_STAT(rx_ptp_resv_msg_type), +}; +#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats) + +static int sxgbe_get_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (!priv->hw_cap.eee) + return -EOPNOTSUPP; + + edata->eee_enabled = priv->eee_enabled; + edata->eee_active = priv->eee_active; + edata->tx_lpi_timer = priv->tx_lpi_timer; + + return phy_ethtool_get_eee(priv->phydev, edata); +} + +static int sxgbe_set_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + priv->eee_enabled = edata->eee_enabled; + + if (!priv->eee_enabled) { + sxgbe_disable_eee_mode(priv); + } else { + /* We are asking for enabling the EEE but it is safe + * to verify all by invoking the eee_init function. + * In case of failure it will return an error. + */ + priv->eee_enabled = sxgbe_eee_init(priv); + if (!priv->eee_enabled) + return -EOPNOTSUPP; + + /* Do not change tx_lpi_timer in case of failure */ + priv->tx_lpi_timer = edata->tx_lpi_timer; + } + + return phy_ethtool_set_eee(priv->phydev, edata); +} + +static void sxgbe_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static int sxgbe_getsettings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->phydev) + return phy_ethtool_gset(priv->phydev, cmd); + + return -EOPNOTSUPP; +} + +static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->phydev) + return phy_ethtool_sset(priv->phydev, cmd); + + return -EOPNOTSUPP; +} + +static u32 sxgbe_getmsglevel(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void sxgbe_setmsglevel(struct net_device *dev, u32 level) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + priv->msg_enable = level; +} + +static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < SXGBE_STATS_LEN; i++) { + memcpy(p, sxgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +static int sxgbe_get_sset_count(struct net_device *netdev, int sset) +{ + int len; + + switch (sset) { + case ETH_SS_STATS: + len = SXGBE_STATS_LEN; + return len; + default: + return -EINVAL; + } +} + +static void sxgbe_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, u64 *data) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int i; + char *p; + + if (priv->eee_enabled) { + int val = phy_get_eee_err(priv->phydev); + + if (val) + priv->xstats.eee_wakeup_error_n = val; + } + + for (i = 0; i < SXGBE_STATS_LEN; i++) { + p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset; + data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) + ? (*(u64 *)p) : (*(u32 *)p); + } +} + +static void sxgbe_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + channel->max_rx = SXGBE_MAX_RX_CHANNELS; + channel->max_tx = SXGBE_MAX_TX_CHANNELS; + channel->rx_count = SXGBE_RX_QUEUES; + channel->tx_count = SXGBE_TX_QUEUES; +} + +static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv) +{ + unsigned long clk = clk_get_rate(priv->sxgbe_clk); + + if (!clk) + return 0; + + return (riwt * 256) / (clk / 1000000); +} + +static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv) +{ + unsigned long clk = clk_get_rate(priv->sxgbe_clk); + + if (!clk) + return 0; + + return (usec * (clk / 1000000)) / 256; +} + +static int sxgbe_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->use_riwt) + ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv); + + return 0; +} + +static int sxgbe_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + unsigned int rx_riwt; + + if (!ec->rx_coalesce_usecs) + return -EINVAL; + + rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv); + + if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT)) + return -EINVAL; + else if (!priv->use_riwt) + return -EOPNOTSUPP; + + priv->rx_riwt = rx_riwt; + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); + + return 0; +} + +static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on sxgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXFH: + ret = sxgbe_get_rss_hash_opts(priv, cmd); + break; + default: + break; + } + + return ret; +} + +static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv, + struct ethtool_rxnfc *cmd) +{ + u32 reg_val = 0; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(cmd->data & RXH_IP_SRC) || + !(cmd->data & RXH_IP_DST) || + !(cmd->data & RXH_L4_B_0_1) || + !(cmd->data & RXH_L4_B_2_3)) + return -EINVAL; + reg_val = SXGBE_CORE_RSS_CTL_TCP4TE; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (!(cmd->data & RXH_IP_SRC) || + !(cmd->data & RXH_IP_DST) || + !(cmd->data & RXH_L4_B_0_1) || + !(cmd->data & RXH_L4_B_2_3)) + return -EINVAL; + reg_val = SXGBE_CORE_RSS_CTL_UDP4TE; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + if (!(cmd->data & RXH_IP_SRC) || + !(cmd->data & RXH_IP_DST) || + (cmd->data & RXH_L4_B_0_1) || + (cmd->data & RXH_L4_B_2_3)) + return -EINVAL; + reg_val = SXGBE_CORE_RSS_CTL_IP2TE; + break; + default: + return -EINVAL; + } + + /* Read SXGBE RSS control register and update */ + reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); + writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); + readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); + + return 0; +} + +static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = sxgbe_set_rss_hash_opt(priv, cmd); + break; + default: + break; + } + + return ret; +} + +static void sxgbe_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + u32 *reg_space = (u32 *)space; + int reg_offset; + int reg_ix = 0; + void __iomem *ioaddr = priv->ioaddr; + + memset(reg_space, 0x0, REG_SPACE_SIZE); + + /* MAC registers */ + for (reg_offset = START_MAC_REG_OFFSET; + reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = readl(ioaddr + reg_offset); + reg_ix++; + } + + /* MTL registers */ + for (reg_offset = START_MTL_REG_OFFSET; + reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = readl(ioaddr + reg_offset); + reg_ix++; + } + + /* DMA registers */ + for (reg_offset = START_DMA_REG_OFFSET; + reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = readl(ioaddr + reg_offset); + reg_ix++; + } + + BUG_ON(reg_ix * 4 > REG_SPACE_SIZE); +} + +static int sxgbe_get_regs_len(struct net_device *dev) +{ + return REG_SPACE_SIZE; +} + +static const struct ethtool_ops sxgbe_ethtool_ops = { + .get_drvinfo = sxgbe_getdrvinfo, + .get_settings = sxgbe_getsettings, + .set_settings = sxgbe_setsettings, + .get_msglevel = sxgbe_getmsglevel, + .set_msglevel = sxgbe_setmsglevel, + .get_link = ethtool_op_get_link, + .get_strings = sxgbe_get_strings, + .get_ethtool_stats = sxgbe_get_ethtool_stats, + .get_sset_count = sxgbe_get_sset_count, + .get_channels = sxgbe_get_channels, + .get_coalesce = sxgbe_get_coalesce, + .set_coalesce = sxgbe_set_coalesce, + .get_rxnfc = sxgbe_get_rxnfc, + .set_rxnfc = sxgbe_set_rxnfc, + .get_regs = sxgbe_get_regs, + .get_regs_len = sxgbe_get_regs_len, + .get_eee = sxgbe_get_eee, + .set_eee = sxgbe_set_eee, +}; + +void sxgbe_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops); +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c new file mode 100644 index 000000000000..a72688e8dc6c --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -0,0 +1,2317 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/crc32.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/kernel.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/net_tstamp.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/prefetch.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/tcp.h> +#include <linux/sxgbe_platform.h> + +#include "sxgbe_common.h" +#include "sxgbe_desc.h" +#include "sxgbe_dma.h" +#include "sxgbe_mtl.h" +#include "sxgbe_reg.h" + +#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x) +#define JUMBO_LEN 9000 + +/* Module parameters */ +#define TX_TIMEO 5000 +#define DMA_TX_SIZE 512 +#define DMA_RX_SIZE 1024 +#define TC_DEFAULT 64 +#define DMA_BUFFER_SIZE BUF_SIZE_2KiB +/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */ +#define SXGBE_DEFAULT_LPI_TIMER 1000 + +static int debug = -1; +static int eee_timer = SXGBE_DEFAULT_LPI_TIMER; + +module_param(eee_timer, int, S_IRUGO | S_IWUSR); + +module_param(debug, int, S_IRUGO | S_IWUSR); +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); + +static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id); +static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id); +static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id); + +#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) + +#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) + +/** + * sxgbe_verify_args - verify the driver parameters. + * Description: it verifies if some wrong parameter is passed to the driver. + * Note that wrong parameters are replaced with the default values. + */ +static void sxgbe_verify_args(void) +{ + if (unlikely(eee_timer < 0)) + eee_timer = SXGBE_DEFAULT_LPI_TIMER; +} + +static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv) +{ + /* Check and enter in LPI mode */ + if (!priv->tx_path_in_lpi_mode) + priv->hw->mac->set_eee_mode(priv->ioaddr); +} + +void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv) +{ + /* Exit and disable EEE in case of we are are in LPI state. */ + priv->hw->mac->reset_eee_mode(priv->ioaddr); + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; +} + +/** + * sxgbe_eee_ctrl_timer + * @arg : data hook + * Description: + * If there is no data transfer and if we are not in LPI state, + * then MAC Transmitter can be moved to LPI state. + */ +static void sxgbe_eee_ctrl_timer(unsigned long arg) +{ + struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg; + + sxgbe_enable_eee_mode(priv); + mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); +} + +/** + * sxgbe_eee_init + * @priv: private device pointer + * Description: + * If the EEE support has been enabled while configuring the driver, + * if the GMAC actually supports the EEE (from the HW cap reg) and the + * phy can also manage EEE, so enable the LPI state and start the timer + * to verify if the tx path can enter in LPI state. + */ +bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) +{ + bool ret = false; + + /* MAC core supports the EEE feature. */ + if (priv->hw_cap.eee) { + /* Check if the PHY supports EEE */ + if (phy_init_eee(priv->phydev, 1)) + return false; + + priv->eee_active = 1; + init_timer(&priv->eee_ctrl_timer); + priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer; + priv->eee_ctrl_timer.data = (unsigned long)priv; + priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); + add_timer(&priv->eee_ctrl_timer); + + priv->hw->mac->set_eee_timer(priv->ioaddr, + SXGBE_DEFAULT_LPI_TIMER, + priv->tx_lpi_timer); + + pr_info("Energy-Efficient Ethernet initialized\n"); + + ret = true; + } + + return ret; +} + +static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) +{ + /* When the EEE has been already initialised we have to + * modify the PLS bit in the LPI ctrl & status reg according + * to the PHY link status. For this reason. + */ + if (priv->eee_enabled) + priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); +} + +/** + * sxgbe_clk_csr_set - dynamically set the MDC clock + * @priv: driver private structure + * Description: this is to dynamically set the MDC clock according to the csr + * clock input. + */ +static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv) +{ + u32 clk_rate = clk_get_rate(priv->sxgbe_clk); + + /* assign the proper divider, this will be used during + * mdio communication + */ + if (clk_rate < SXGBE_CSR_F_150M) + priv->clk_csr = SXGBE_CSR_100_150M; + else if (clk_rate <= SXGBE_CSR_F_250M) + priv->clk_csr = SXGBE_CSR_150_250M; + else if (clk_rate <= SXGBE_CSR_F_300M) + priv->clk_csr = SXGBE_CSR_250_300M; + else if (clk_rate <= SXGBE_CSR_F_350M) + priv->clk_csr = SXGBE_CSR_300_350M; + else if (clk_rate <= SXGBE_CSR_F_400M) + priv->clk_csr = SXGBE_CSR_350_400M; + else if (clk_rate <= SXGBE_CSR_F_500M) + priv->clk_csr = SXGBE_CSR_400_500M; +} + +/* minimum number of free TX descriptors required to wake up TX process */ +#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4) + +static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize) +{ + return queue->dirty_tx + tx_qsize - queue->cur_tx - 1; +} + +/** + * sxgbe_adjust_link + * @dev: net device structure + * Description: it adjusts the link parameters. + */ +static void sxgbe_adjust_link(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + u8 new_state = 0; + u8 speed = 0xff; + + if (!phydev) + return; + + /* SXGBE is not supporting auto-negotiation and + * half duplex mode. so, not handling duplex change + * in this function. only handling speed and link status + */ + if (phydev->link) { + if (phydev->speed != priv->speed) { + new_state = 1; + switch (phydev->speed) { + case SPEED_10000: + speed = SXGBE_SPEED_10G; + break; + case SPEED_2500: + speed = SXGBE_SPEED_2_5G; + break; + case SPEED_1000: + speed = SXGBE_SPEED_1G; + break; + default: + netif_err(priv, link, dev, + "Speed (%d) not supported\n", + phydev->speed); + } + + priv->speed = phydev->speed; + priv->hw->mac->set_speed(priv->ioaddr, speed); + } + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->speed = SPEED_UNKNOWN; + } + + if (new_state & netif_msg_link(priv)) + phy_print_status(phydev); + + /* Alter the MAC settings for EEE */ + sxgbe_eee_adjust(priv); +} + +/** + * sxgbe_init_phy - PHY initialization + * @dev: net device structure + * Description: it initializes the driver's PHY state, and attaches the PHY + * to the mac driver. + * Return value: + * 0 on success + */ +static int sxgbe_init_phy(struct net_device *ndev) +{ + char phy_id_fmt[MII_BUS_ID_SIZE + 3]; + char bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + int phy_iface = priv->plat->interface; + + /* assign default link status */ + priv->oldlink = 0; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; + + if (priv->plat->phy_bus_name) + snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", + priv->plat->phy_bus_name, priv->plat->bus_id); + else + snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x", + priv->plat->bus_id); + + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, + priv->plat->phy_addr); + netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt); + + phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface); + + if (IS_ERR(phydev)) { + netdev_err(ndev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* Stop Advertising 1000BASE Capability if interface is not GMII */ + if ((phy_iface == PHY_INTERFACE_MODE_MII) || + (phy_iface == PHY_INTERFACE_MODE_RMII)) + phydev->advertising &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + if (phydev->phy_id == 0) { + phy_disconnect(phydev); + return -ENODEV; + } + + netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n", + __func__, phydev->phy_id, phydev->link); + + /* save phy device in private structure */ + priv->phydev = phydev; + + return 0; +} + +/** + * sxgbe_clear_descriptors: clear descriptors + * @priv: driver private structure + * Description: this function is called to clear the tx and rx descriptors + * in case of both basic and extended descriptors are used. + */ +static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv) +{ + int i, j; + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + + /* Clear the Rx/Tx descriptors */ + for (j = 0; j < SXGBE_RX_QUEUES; j++) { + for (i = 0; i < rxsize; i++) + priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], + priv->use_riwt, priv->mode, + (i == rxsize - 1)); + } + + for (j = 0; j < SXGBE_TX_QUEUES; j++) { + for (i = 0; i < txsize; i++) + priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); + } +} + +static int sxgbe_init_rx_buffers(struct net_device *dev, + struct sxgbe_rx_norm_desc *p, int i, + unsigned int dma_buf_sz, + struct sxgbe_rx_queue *rx_ring) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + struct sk_buff *skb; + + skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + rx_ring->rx_skbuff[i] = skb; + rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + dma_buf_sz, DMA_FROM_DEVICE); + + if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) { + netdev_err(dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i]; + + return 0; +} +/** + * init_tx_ring - init the TX descriptor ring + * @dev: net device structure + * @tx_ring: ring to be intialised + * @tx_rsize: ring size + * Description: this function initializes the DMA TX descriptor + */ +static int init_tx_ring(struct device *dev, u8 queue_no, + struct sxgbe_tx_queue *tx_ring, int tx_rsize) +{ + /* TX ring is not allcoated */ + if (!tx_ring) { + dev_err(dev, "No memory for TX queue of SXGBE\n"); + return -ENOMEM; + } + + /* allocate memory for TX descriptors */ + tx_ring->dma_tx = dma_zalloc_coherent(dev, + tx_rsize * sizeof(struct sxgbe_tx_norm_desc), + &tx_ring->dma_tx_phy, GFP_KERNEL); + if (!tx_ring->dma_tx) + return -ENOMEM; + + /* allocate memory for TX skbuff array */ + tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize, + sizeof(dma_addr_t), GFP_KERNEL); + if (!tx_ring->tx_skbuff_dma) + goto dmamem_err; + + tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize, + sizeof(struct sk_buff *), GFP_KERNEL); + + if (!tx_ring->tx_skbuff) + goto dmamem_err; + + /* assign queue number */ + tx_ring->queue_no = queue_no; + + /* initalise counters */ + tx_ring->dirty_tx = 0; + tx_ring->cur_tx = 0; + + /* initalise TX queue lock */ + spin_lock_init(&tx_ring->tx_lock); + + return 0; + +dmamem_err: + dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc), + tx_ring->dma_tx, tx_ring->dma_tx_phy); + return -ENOMEM; +} + +/** + * free_rx_ring - free the RX descriptor ring + * @dev: net device structure + * @rx_ring: ring to be intialised + * @rx_rsize: ring size + * Description: this function initializes the DMA RX descriptor + */ +void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, + int rx_rsize) +{ + dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + rx_ring->dma_rx, rx_ring->dma_rx_phy); + kfree(rx_ring->rx_skbuff_dma); + kfree(rx_ring->rx_skbuff); +} + +/** + * init_rx_ring - init the RX descriptor ring + * @dev: net device structure + * @rx_ring: ring to be intialised + * @rx_rsize: ring size + * Description: this function initializes the DMA RX descriptor + */ +static int init_rx_ring(struct net_device *dev, u8 queue_no, + struct sxgbe_rx_queue *rx_ring, int rx_rsize) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int desc_index; + unsigned int bfsize = 0; + unsigned int ret = 0; + + /* Set the max buffer size according to the MTU. */ + bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); + + netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize); + + /* RX ring is not allcoated */ + if (rx_ring == NULL) { + netdev_err(dev, "No memory for RX queue\n"); + goto error; + } + + /* assign queue number */ + rx_ring->queue_no = queue_no; + + /* allocate memory for RX descriptors */ + rx_ring->dma_rx = dma_zalloc_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + &rx_ring->dma_rx_phy, GFP_KERNEL); + + if (rx_ring->dma_rx == NULL) + goto error; + + /* allocate memory for RX skbuff array */ + rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, + sizeof(dma_addr_t), GFP_KERNEL); + if (rx_ring->rx_skbuff_dma == NULL) + goto dmamem_err; + + rx_ring->rx_skbuff = kmalloc_array(rx_rsize, + sizeof(struct sk_buff *), GFP_KERNEL); + if (rx_ring->rx_skbuff == NULL) + goto rxbuff_err; + + /* initialise the buffers */ + for (desc_index = 0; desc_index < rx_rsize; desc_index++) { + struct sxgbe_rx_norm_desc *p; + p = rx_ring->dma_rx + desc_index; + ret = sxgbe_init_rx_buffers(dev, p, desc_index, + bfsize, rx_ring); + if (ret) + goto err_init_rx_buffers; + } + + /* initalise counters */ + rx_ring->cur_rx = 0; + rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); + priv->dma_buf_sz = bfsize; + + return 0; + +err_init_rx_buffers: + while (--desc_index >= 0) + free_rx_ring(priv->device, rx_ring, desc_index); + kfree(rx_ring->rx_skbuff); +rxbuff_err: + kfree(rx_ring->rx_skbuff_dma); +dmamem_err: + dma_free_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + rx_ring->dma_rx, rx_ring->dma_rx_phy); +error: + return -ENOMEM; +} +/** + * free_tx_ring - free the TX descriptor ring + * @dev: net device structure + * @tx_ring: ring to be intialised + * @tx_rsize: ring size + * Description: this function initializes the DMA TX descriptor + */ +void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring, + int tx_rsize) +{ + dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc), + tx_ring->dma_tx, tx_ring->dma_tx_phy); +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. It suppors the chained and ring + * modes. + */ +static int init_dma_desc_rings(struct net_device *netd) +{ + int queue_num, ret; + struct sxgbe_priv_data *priv = netdev_priv(netd); + int tx_rsize = priv->dma_tx_size; + int rx_rsize = priv->dma_rx_size; + + /* Allocate memory for queue structures and TX descs */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + ret = init_tx_ring(priv->device, queue_num, + priv->txq[queue_num], tx_rsize); + if (ret) { + dev_err(&netd->dev, "TX DMA ring allocation failed!\n"); + goto txalloc_err; + } + + /* save private pointer in each ring this + * pointer is needed during cleaing TX queue + */ + priv->txq[queue_num]->priv_ptr = priv; + } + + /* Allocate memory for queue structures and RX descs */ + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + ret = init_rx_ring(netd, queue_num, + priv->rxq[queue_num], rx_rsize); + if (ret) { + netdev_err(netd, "RX DMA ring allocation failed!!\n"); + goto rxalloc_err; + } + + /* save private pointer in each ring this + * pointer is needed during cleaing TX queue + */ + priv->rxq[queue_num]->priv_ptr = priv; + } + + sxgbe_clear_descriptors(priv); + + return 0; + +txalloc_err: + while (queue_num--) + free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); + return ret; + +rxalloc_err: + while (queue_num--) + free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); + return ret; +} + +static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue) +{ + int dma_desc; + struct sxgbe_priv_data *priv = txqueue->priv_ptr; + int tx_rsize = priv->dma_tx_size; + + for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) { + struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc; + + if (txqueue->tx_skbuff_dma[dma_desc]) + dma_unmap_single(priv->device, + txqueue->tx_skbuff_dma[dma_desc], + priv->hw->desc->get_tx_len(tdesc), + DMA_TO_DEVICE); + + dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]); + txqueue->tx_skbuff[dma_desc] = NULL; + txqueue->tx_skbuff_dma[dma_desc] = 0; + } +} + + +static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; + tx_free_ring_skbufs(tqueue); + } +} + +static void free_dma_desc_resources(struct sxgbe_priv_data *priv) +{ + int queue_num; + int tx_rsize = priv->dma_tx_size; + int rx_rsize = priv->dma_rx_size; + + /* Release the DMA TX buffers */ + dma_free_tx_skbufs(priv); + + /* Release the TX ring memory also */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); + } + + /* Release the RX ring memory also */ + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); + } +} + +static int txring_mem_alloc(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + priv->txq[queue_num] = devm_kmalloc(priv->device, + sizeof(struct sxgbe_tx_queue), GFP_KERNEL); + if (!priv->txq[queue_num]) + return -ENOMEM; + } + + return 0; +} + +static int rxring_mem_alloc(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + priv->rxq[queue_num] = devm_kmalloc(priv->device, + sizeof(struct sxgbe_rx_queue), GFP_KERNEL); + if (!priv->rxq[queue_num]) + return -ENOMEM; + } + + return 0; +} + +/** + * sxgbe_mtl_operation_mode - HW MTL operation mode + * @priv: driver private structure + * Description: it sets the MTL operation mode: tx/rx MTL thresholds + * or Store-And-Forward capability. + */ +static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv) +{ + int queue_num; + + /* TX/RX threshold control */ + if (likely(priv->plat->force_sf_dma_mode)) { + /* set TC mode for TX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) + priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, + SXGBE_MTL_SFMODE); + priv->tx_tc = SXGBE_MTL_SFMODE; + + /* set TC mode for RX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) + priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, + SXGBE_MTL_SFMODE); + priv->rx_tc = SXGBE_MTL_SFMODE; + } else if (unlikely(priv->plat->force_thresh_dma_mode)) { + /* set TC mode for TX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) + priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, + priv->tx_tc); + /* set TC mode for RX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) + priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, + priv->rx_tc); + } else { + pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__); + } +} + +/** + * sxgbe_tx_queue_clean: + * @priv: driver private structure + * Description: it reclaims resources after transmission completes. + */ +static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue) +{ + struct sxgbe_priv_data *priv = tqueue->priv_ptr; + unsigned int tx_rsize = priv->dma_tx_size; + struct netdev_queue *dev_txq; + u8 queue_no = tqueue->queue_no; + + dev_txq = netdev_get_tx_queue(priv->dev, queue_no); + + spin_lock(&tqueue->tx_lock); + + priv->xstats.tx_clean++; + while (tqueue->dirty_tx != tqueue->cur_tx) { + unsigned int entry = tqueue->dirty_tx % tx_rsize; + struct sk_buff *skb = tqueue->tx_skbuff[entry]; + struct sxgbe_tx_norm_desc *p; + + p = tqueue->dma_tx + entry; + + /* Check if the descriptor is owned by the DMA. */ + if (priv->hw->desc->get_tx_owner(p)) + break; + + if (netif_msg_tx_done(priv)) + pr_debug("%s: curr %d, dirty %d\n", + __func__, tqueue->cur_tx, tqueue->dirty_tx); + + if (likely(tqueue->tx_skbuff_dma[entry])) { + dma_unmap_single(priv->device, + tqueue->tx_skbuff_dma[entry], + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + tqueue->tx_skbuff_dma[entry] = 0; + } + + if (likely(skb)) { + dev_kfree_skb(skb); + tqueue->tx_skbuff[entry] = NULL; + } + + priv->hw->desc->release_tx_desc(p); + + tqueue->dirty_tx++; + } + + /* wake up queue */ + if (unlikely(netif_tx_queue_stopped(dev_txq) && + sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_tx_queue_stopped(dev_txq) && + sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) { + if (netif_msg_tx_done(priv)) + pr_debug("%s: restart transmit\n", __func__); + netif_tx_wake_queue(dev_txq); + } + netif_tx_unlock(priv->dev); + } + + spin_unlock(&tqueue->tx_lock); +} + +/** + * sxgbe_tx_clean: + * @priv: driver private structure + * Description: it reclaims resources after transmission completes. + */ +static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv) +{ + u8 queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; + + sxgbe_tx_queue_clean(tqueue); + } + + if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { + sxgbe_enable_eee_mode(priv); + mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); + } +} + +/** + * sxgbe_restart_tx_queue: irq tx error mng function + * @priv: driver private structure + * Description: it cleans the descriptors and restarts the transmission + * in case of errors. + */ +static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num) +{ + struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; + struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev, + queue_num); + + /* stop the queue */ + netif_tx_stop_queue(dev_txq); + + /* stop the tx dma */ + priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num); + + /* free the skbuffs of the ring */ + tx_free_ring_skbufs(tx_ring); + + /* initalise counters */ + tx_ring->cur_tx = 0; + tx_ring->dirty_tx = 0; + + /* start the tx dma */ + priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num); + + priv->dev->stats.tx_errors++; + + /* wakeup the queue */ + netif_tx_wake_queue(dev_txq); +} + +/** + * sxgbe_reset_all_tx_queues: irq tx error mng function + * @priv: driver private structure + * Description: it cleans all the descriptors and + * restarts the transmission on all queues in case of errors. + */ +static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv) +{ + int queue_num; + + /* On TX timeout of net device, resetting of all queues + * may not be proper way, revisit this later if needed + */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) + sxgbe_restart_tx_queue(priv, queue_num); +} + +/** + * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register. + * @priv: driver private structure + * Description: + * new GMAC chip generations have a new register to indicate the + * presence of the optional feature/functions. + * This can be also used to override the value passed through the + * platform and necessary for old MAC10/100 and GMAC chips. + */ +static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv) +{ + int rval = 0; + struct sxgbe_hw_features *features = &priv->hw_cap; + + /* Read First Capability Register CAP[0] */ + rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0); + if (rval) { + features->pmt_remote_wake_up = + SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval); + features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval); + features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval); + features->tx_csum_offload = + SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval); + features->rx_csum_offload = + SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval); + features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval); + features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval); + features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval); + features->eee = SXGBE_HW_FEAT_EEE(rval); + } + + /* Read First Capability Register CAP[1] */ + rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1); + if (rval) { + features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval); + features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); + features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); + features->dcb_enable = SXGBE_HW_FEAT_DCB(rval); + features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval); + features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval); + features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval); + features->rss_enable = SXGBE_HW_FEAT_RSS(rval); + features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval); + features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval); + } + + /* Read First Capability Register CAP[2] */ + rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2); + if (rval) { + features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval); + features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval); + features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval); + features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval); + features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval); + features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval); + } + + return rval; +} + +/** + * sxgbe_check_ether_addr: check if the MAC addr is valid + * @priv: driver private structure + * Description: + * it is to verify if the MAC address is valid, in case of failures it + * generates a random MAC address + */ +static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv) +{ + if (!is_valid_ether_addr(priv->dev->dev_addr)) { + priv->hw->mac->get_umac_addr((void __iomem *) + priv->ioaddr, + priv->dev->dev_addr, 0); + if (!is_valid_ether_addr(priv->dev->dev_addr)) + eth_hw_addr_random(priv->dev); + } + dev_info(priv->device, "device MAC address %pM\n", + priv->dev->dev_addr); +} + +/** + * sxgbe_init_dma_engine: DMA init. + * @priv: driver private structure + * Description: + * It inits the DMA invoking the specific SXGBE callback. + * Some DMA parameters can be passed from the platform; + * in case of these are not passed a default is kept for the MAC or GMAC. + */ +static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv) +{ + int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0; + int queue_num; + + if (priv->plat->dma_cfg) { + pbl = priv->plat->dma_cfg->pbl; + fixed_burst = priv->plat->dma_cfg->fixed_burst; + burst_map = priv->plat->dma_cfg->burst_map; + } + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) + priv->hw->dma->cha_init(priv->ioaddr, queue_num, + fixed_burst, pbl, + (priv->txq[queue_num])->dma_tx_phy, + (priv->rxq[queue_num])->dma_rx_phy, + priv->dma_tx_size, priv->dma_rx_size); + + return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map); +} + +/** + * sxgbe_init_mtl_engine: MTL init. + * @priv: driver private structure + * Description: + * It inits the MTL invoking the specific SXGBE callback. + */ +static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num, + priv->hw_cap.tx_mtl_qsize); + priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num); + } +} + +/** + * sxgbe_disable_mtl_engine: MTL disable. + * @priv: driver private structure + * Description: + * It disables the MTL queues by invoking the specific SXGBE callback. + */ +static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) + priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num); +} + + +/** + * sxgbe_tx_timer: mitigation sw timer for tx. + * @data: data pointer + * Description: + * This is the timer handler to directly invoke the sxgbe_tx_clean. + */ +static void sxgbe_tx_timer(unsigned long data) +{ + struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data; + sxgbe_tx_queue_clean(p); +} + +/** + * sxgbe_init_tx_coalesce: init tx mitigation options. + * @priv: driver private structure + * Description: + * This inits the transmit coalesce parameters: i.e. timer rate, + * timer handler and default threshold used for enabling the + * interrupt on completion bit. + */ +static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv) +{ + u8 queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *p = priv->txq[queue_num]; + p->tx_coal_frames = SXGBE_TX_FRAMES; + p->tx_coal_timer = SXGBE_COAL_TX_TIMER; + init_timer(&p->txtimer); + p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); + p->txtimer.data = (unsigned long)&priv->txq[queue_num]; + p->txtimer.function = sxgbe_tx_timer; + add_timer(&p->txtimer); + } +} + +static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv) +{ + u8 queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *p = priv->txq[queue_num]; + del_timer_sync(&p->txtimer); + } +} + +/** + * sxgbe_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int sxgbe_open(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret, queue_num; + + clk_prepare_enable(priv->sxgbe_clk); + + sxgbe_check_ether_addr(priv); + + /* Init the phy */ + ret = sxgbe_init_phy(dev); + if (ret) { + netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n", + __func__, ret); + goto phy_error; + } + + /* Create and initialize the TX/RX descriptors chains. */ + priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE); + priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE); + priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE); + priv->tx_tc = TC_DEFAULT; + priv->rx_tc = TC_DEFAULT; + init_dma_desc_rings(dev); + + /* DMA initialization and SW reset */ + ret = sxgbe_init_dma_engine(priv); + if (ret < 0) { + netdev_err(dev, "%s: DMA initialization failed\n", __func__); + goto init_error; + } + + /* MTL initialization */ + sxgbe_init_mtl_engine(priv); + + /* Copy the MAC addr into the HW */ + priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); + + /* Initialize the MAC Core */ + priv->hw->mac->core_init(priv->ioaddr); + + /* Request the IRQ lines */ + ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, priv->irq, ret); + goto init_error; + } + + /* If the LPI irq is different from the mac irq + * register a dedicated handler + */ + if (priv->lpi_irq != dev->irq) { + ret = devm_request_irq(priv->device, priv->lpi_irq, + sxgbe_common_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); + goto init_error; + } + } + + /* Request TX DMA irq lines */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + ret = devm_request_irq(priv->device, + (priv->txq[queue_num])->irq_no, + sxgbe_tx_interrupt, 0, + dev->name, priv->txq[queue_num]); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n", + __func__, priv->irq, ret); + goto init_error; + } + } + + /* Request RX DMA irq lines */ + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + ret = devm_request_irq(priv->device, + (priv->rxq[queue_num])->irq_no, + sxgbe_rx_interrupt, 0, + dev->name, priv->rxq[queue_num]); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n", + __func__, priv->irq, ret); + goto init_error; + } + } + + /* Enable the MAC Rx/Tx */ + priv->hw->mac->enable_tx(priv->ioaddr, true); + priv->hw->mac->enable_rx(priv->ioaddr, true); + + /* Set the HW DMA mode and the COE */ + sxgbe_mtl_operation_mode(priv); + + /* Extra statistics */ + memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats)); + + priv->xstats.tx_threshold = priv->tx_tc; + priv->xstats.rx_threshold = priv->rx_tc; + + /* Start the ball rolling... */ + netdev_dbg(dev, "DMA RX/TX processes started...\n"); + priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); + priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); + + if (priv->phydev) + phy_start(priv->phydev); + + /* initalise TX coalesce parameters */ + sxgbe_tx_init_coalesce(priv); + + if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { + priv->rx_riwt = SXGBE_MAX_DMA_RIWT; + priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT); + } + + priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER; + priv->eee_enabled = sxgbe_eee_init(priv); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + +init_error: + free_dma_desc_resources(priv); + if (priv->phydev) + phy_disconnect(priv->phydev); +phy_error: + clk_disable_unprepare(priv->sxgbe_clk); + + return ret; +} + +/** + * sxgbe_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int sxgbe_release(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->eee_enabled) + del_timer_sync(&priv->eee_ctrl_timer); + + /* Stop and disconnect the PHY */ + if (priv->phydev) { + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + netif_tx_stop_all_queues(dev); + + napi_disable(&priv->napi); + + /* delete TX timers */ + sxgbe_tx_del_timer(priv); + + /* Stop TX/RX DMA and clear the descriptors */ + priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); + priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); + + /* disable MTL queue */ + sxgbe_disable_mtl_engine(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC Rx/Tx */ + priv->hw->mac->enable_tx(priv->ioaddr, false); + priv->hw->mac->enable_rx(priv->ioaddr, false); + + clk_disable_unprepare(priv->sxgbe_clk); + + return 0; +} + +/* Prepare first Tx descriptor for doing TSO operation */ +void sxgbe_tso_prepare(struct sxgbe_priv_data *priv, + struct sxgbe_tx_norm_desc *first_desc, + struct sk_buff *skb) +{ + unsigned int total_hdr_len, tcp_hdr_len; + + /* Write first Tx descriptor with appropriate value */ + tcp_hdr_len = tcp_hdrlen(skb); + total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len; + + first_desc->tdes01 = dma_map_single(priv->device, skb->data, + total_hdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, first_desc->tdes01)) + pr_err("%s: TX dma mapping failed!!\n", __func__); + + first_desc->tdes23.tx_rd_des23.first_desc = 1; + priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len, + tcp_hdr_len, + skb->len - total_hdr_len); +} + +/** + * sxgbe_xmit: Tx entry point of the driver + * @skb : the socket buffer + * @dev : device pointer + * Description : this is the tx entry point of the driver. + * It programs the chain or the ring and supports oversized frames + * and SG feature. + */ +static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned int entry, frag_num; + int cksum_flag = 0; + struct netdev_queue *dev_txq; + unsigned txq_index = skb_get_queue_mapping(skb); + struct sxgbe_priv_data *priv = netdev_priv(dev); + unsigned int tx_rsize = priv->dma_tx_size; + struct sxgbe_tx_queue *tqueue = priv->txq[txq_index]; + struct sxgbe_tx_norm_desc *tx_desc, *first_desc; + struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL; + int nr_frags = skb_shinfo(skb)->nr_frags; + int no_pagedlen = skb_headlen(skb); + int is_jumbo = 0; + u16 cur_mss = skb_shinfo(skb)->gso_size; + u32 ctxt_desc_req = 0; + + /* get the TX queue handle */ + dev_txq = netdev_get_tx_queue(dev, txq_index); + + if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) + ctxt_desc_req = 1; + + if (unlikely(vlan_tx_tag_present(skb) || + ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + tqueue->hwts_tx_en))) + ctxt_desc_req = 1; + + /* get the spinlock */ + spin_lock(&tqueue->tx_lock); + + if (priv->tx_path_in_lpi_mode) + sxgbe_disable_eee_mode(priv); + + if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) { + if (!netif_tx_queue_stopped(dev_txq)) { + netif_tx_stop_queue(dev_txq); + netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n", + __func__, txq_index); + } + /* release the spin lock in case of BUSY */ + spin_unlock(&tqueue->tx_lock); + return NETDEV_TX_BUSY; + } + + entry = tqueue->cur_tx % tx_rsize; + tx_desc = tqueue->dma_tx + entry; + + first_desc = tx_desc; + if (ctxt_desc_req) + ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc; + + /* save the skb address */ + tqueue->tx_skbuff[entry] = skb; + + if (!is_jumbo) { + if (likely(skb_is_gso(skb))) { + /* TSO support */ + if (unlikely(tqueue->prev_mss != cur_mss)) { + priv->hw->desc->tx_ctxt_desc_set_mss( + ctxt_desc, cur_mss); + priv->hw->desc->tx_ctxt_desc_set_tcmssv( + ctxt_desc); + priv->hw->desc->tx_ctxt_desc_reset_ostc( + ctxt_desc); + priv->hw->desc->tx_ctxt_desc_set_ctxt( + ctxt_desc); + priv->hw->desc->tx_ctxt_desc_set_owner( + ctxt_desc); + + entry = (++tqueue->cur_tx) % tx_rsize; + first_desc = tqueue->dma_tx + entry; + + tqueue->prev_mss = cur_mss; + } + sxgbe_tso_prepare(priv, first_desc, skb); + } else { + tx_desc->tdes01 = dma_map_single(priv->device, + skb->data, no_pagedlen, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, tx_desc->tdes01)) + netdev_err(dev, "%s: TX dma mapping failed!!\n", + __func__); + + priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen, + no_pagedlen, cksum_flag); + } + } + + for (frag_num = 0; frag_num < nr_frags; frag_num++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; + int len = skb_frag_size(frag); + + entry = (++tqueue->cur_tx) % tx_rsize; + tx_desc = tqueue->dma_tx + entry; + tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + + tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01; + tqueue->tx_skbuff[entry] = NULL; + + /* prepare the descriptor */ + priv->hw->desc->prepare_tx_desc(tx_desc, 0, len, + len, cksum_flag); + /* memory barrier to flush descriptor */ + wmb(); + + /* set the owner */ + priv->hw->desc->set_tx_owner(tx_desc); + } + + /* close the descriptors */ + priv->hw->desc->close_tx_desc(tx_desc); + + /* memory barrier to flush descriptor */ + wmb(); + + tqueue->tx_count_frames += nr_frags + 1; + if (tqueue->tx_count_frames > tqueue->tx_coal_frames) { + priv->hw->desc->clear_tx_ic(tx_desc); + priv->xstats.tx_reset_ic_bit++; + mod_timer(&tqueue->txtimer, + SXGBE_COAL_TIMER(tqueue->tx_coal_timer)); + } else { + tqueue->tx_count_frames = 0; + } + + /* set owner for first desc */ + priv->hw->desc->set_tx_owner(first_desc); + + /* memory barrier to flush descriptor */ + wmb(); + + tqueue->cur_tx++; + + /* display current ring */ + netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n", + __func__, tqueue->cur_tx % tx_rsize, + tqueue->dirty_tx % tx_rsize, entry, + first_desc, nr_frags); + + if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n", + __func__); + netif_tx_stop_queue(dev_txq); + } + + dev->stats.tx_bytes += skb->len; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + tqueue->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->tx_enable_tstamp(first_desc); + } + + if (!tqueue->hwts_tx_en) + skb_tx_timestamp(skb); + + priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); + + spin_unlock(&tqueue->tx_lock); + + return NETDEV_TX_OK; +} + +/** + * sxgbe_rx_refill: refill used skb preallocated buffers + * @priv: driver private structure + * Description : this is to reallocate the skb for the reception process + * that is based on zero-copy. + */ +static void sxgbe_rx_refill(struct sxgbe_priv_data *priv) +{ + unsigned int rxsize = priv->dma_rx_size; + int bfsize = priv->dma_buf_sz; + u8 qnum = priv->cur_rx_qnum; + + for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0; + priv->rxq[qnum]->dirty_rx++) { + unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize; + struct sxgbe_rx_norm_desc *p; + + p = priv->rxq[qnum]->dma_rx + entry; + + if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) { + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); + + if (unlikely(skb == NULL)) + break; + + priv->rxq[qnum]->rx_skbuff[entry] = skb; + priv->rxq[qnum]->rx_skbuff_dma[entry] = + dma_map_single(priv->device, skb->data, bfsize, + DMA_FROM_DEVICE); + + p->rdes23.rx_rd_des23.buf2_addr = + priv->rxq[qnum]->rx_skbuff_dma[entry]; + } + + /* Added memory barrier for RX descriptor modification */ + wmb(); + priv->hw->desc->set_rx_owner(p); + /* Added memory barrier for RX descriptor modification */ + wmb(); + } +} + +/** + * sxgbe_rx: receive the frames from the remote host + * @priv: driver private structure + * @limit: napi bugget. + * Description : this the function called by the napi poll method. + * It gets all the frames inside the ring. + */ +static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit) +{ + u8 qnum = priv->cur_rx_qnum; + unsigned int rxsize = priv->dma_rx_size; + unsigned int entry = priv->rxq[qnum]->cur_rx; + unsigned int next_entry = 0; + unsigned int count = 0; + int checksum; + int status; + + while (count < limit) { + struct sxgbe_rx_norm_desc *p; + struct sk_buff *skb; + int frame_len; + + p = priv->rxq[qnum]->dma_rx + entry; + + if (priv->hw->desc->get_rx_owner(p)) + break; + + count++; + + next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize; + prefetch(priv->rxq[qnum]->dma_rx + next_entry); + + /* Read the status of the incoming frame and also get checksum + * value based on whether it is enabled in SXGBE hardware or + * not. + */ + status = priv->hw->desc->rx_wbstatus(p, &priv->xstats, + &checksum); + if (unlikely(status < 0)) { + entry = next_entry; + continue; + } + if (unlikely(!priv->rxcsum_insertion)) + checksum = CHECKSUM_NONE; + + skb = priv->rxq[qnum]->rx_skbuff[entry]; + + if (unlikely(!skb)) + netdev_err(priv->dev, "rx descriptor is not consistent\n"); + + prefetch(skb->data - NET_IP_ALIGN); + priv->rxq[qnum]->rx_skbuff[entry] = NULL; + + frame_len = priv->hw->desc->get_rx_frame_len(p); + + skb_put(skb, frame_len); + + skb->ip_summed = checksum; + if (checksum == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&priv->napi, skb); + + entry = next_entry; + } + + sxgbe_rx_refill(priv); + + return count; +} + +/** + * sxgbe_poll - sxgbe poll method (NAPI) + * @napi : pointer to the napi structure. + * @budget : maximum number of packets that the current CPU can receive from + * all interfaces. + * Description : + * To look at the incoming frames and clear the tx resources. + */ +static int sxgbe_poll(struct napi_struct *napi, int budget) +{ + struct sxgbe_priv_data *priv = container_of(napi, + struct sxgbe_priv_data, napi); + int work_done = 0; + u8 qnum = priv->cur_rx_qnum; + + priv->xstats.napi_poll++; + /* first, clean the tx queues */ + sxgbe_tx_all_clean(priv); + + work_done = sxgbe_rx(priv, budget); + if (work_done < budget) { + napi_complete(napi); + priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); + } + + return work_done; +} + +/** + * sxgbe_tx_timeout + * @dev : Pointer to net device structure + * Description: this function is called when a packet transmission fails to + * complete within a reasonable time. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void sxgbe_tx_timeout(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + sxgbe_reset_all_tx_queues(priv); +} + +/** + * sxgbe_common_interrupt - main ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the main driver interrupt service routine. + * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI + * interrupts. + */ +static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = (struct net_device *)dev_id; + struct sxgbe_priv_data *priv = netdev_priv(netdev); + int status; + + status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats); + /* For LPI we need to save the tx status */ + if (status & TX_ENTRY_LPI_MODE) { + priv->xstats.tx_lpi_entry_n++; + priv->tx_path_in_lpi_mode = true; + } + if (status & TX_EXIT_LPI_MODE) { + priv->xstats.tx_lpi_exit_n++; + priv->tx_path_in_lpi_mode = false; + } + if (status & RX_ENTRY_LPI_MODE) + priv->xstats.rx_lpi_entry_n++; + if (status & RX_EXIT_LPI_MODE) + priv->xstats.rx_lpi_exit_n++; + + return IRQ_HANDLED; +} + +/** + * sxgbe_tx_interrupt - TX DMA ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the tx dma interrupt service routine. + */ +static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id) +{ + int status; + struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id; + struct sxgbe_priv_data *priv = txq->priv_ptr; + + /* get the channel status */ + status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no, + &priv->xstats); + /* check for normal path */ + if (likely((status & handle_tx))) + napi_schedule(&priv->napi); + + /* check for unrecoverable error */ + if (unlikely((status & tx_hard_error))) + sxgbe_restart_tx_queue(priv, txq->queue_no); + + /* check for TC configuration change */ + if (unlikely((status & tx_bump_tc) && + (priv->tx_tc != SXGBE_MTL_SFMODE) && + (priv->tx_tc < 512))) { + /* step of TX TC is 32 till 128, otherwise 64 */ + priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64; + priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, + txq->queue_no, priv->tx_tc); + priv->xstats.tx_threshold = priv->tx_tc; + } + + return IRQ_HANDLED; +} + +/** + * sxgbe_rx_interrupt - RX DMA ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the rx dma interrupt service routine. + */ +static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id) +{ + int status; + struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id; + struct sxgbe_priv_data *priv = rxq->priv_ptr; + + /* get the channel status */ + status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no, + &priv->xstats); + + if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) { + priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no); + __napi_schedule(&priv->napi); + } + + /* check for TC configuration change */ + if (unlikely((status & rx_bump_tc) && + (priv->rx_tc != SXGBE_MTL_SFMODE) && + (priv->rx_tc < 128))) { + /* step of TC is 32 */ + priv->rx_tc += 32; + priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, + rxq->queue_no, priv->rx_tc); + priv->xstats.rx_threshold = priv->rx_tc; + } + + return IRQ_HANDLED; +} + +static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) +{ + u64 val = readl(ioaddr + reg_lo); + + val |= ((u64)readl(ioaddr + reg_hi)) << 32; + + return val; +} + + +/* sxgbe_get_stats64 - entry point to see statistical information of device + * @dev : device pointer. + * @stats : pointer to hold all the statistical information of device. + * Description: + * This function is a driver entry point whenever ifconfig command gets + * executed to see device statistics. Statistics are number of + * bytes sent or received, errors occured etc. + * Return value: + * This function returns various statistical information of device. + */ +static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->ioaddr; + u64 count; + + spin_lock(&priv->stats_lock); + /* Freeze the counter registers before reading value otherwise it may + * get updated by hardware while we are reading them + */ + writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG); + + stats->rx_bytes = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXOCTETLO_GCNT_REG, + SXGBE_MMC_RXOCTETHI_GCNT_REG); + + stats->rx_packets = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXFRAMELO_GBCNT_REG, + SXGBE_MMC_RXFRAMEHI_GBCNT_REG); + + stats->multicast = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXMULTILO_GCNT_REG, + SXGBE_MMC_RXMULTIHI_GCNT_REG); + + stats->rx_crc_errors = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXCRCERRLO_REG, + SXGBE_MMC_RXCRCERRHI_REG); + + stats->rx_length_errors = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXLENERRLO_REG, + SXGBE_MMC_RXLENERRHI_REG); + + stats->rx_missed_errors = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG, + SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG); + + stats->tx_bytes = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_TXOCTETLO_GCNT_REG, + SXGBE_MMC_TXOCTETHI_GCNT_REG); + + count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG, + SXGBE_MMC_TXFRAMEHI_GBCNT_REG); + + stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG, + SXGBE_MMC_TXFRAMEHI_GCNT_REG); + stats->tx_errors = count - stats->tx_errors; + stats->tx_packets = count; + stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG, + SXGBE_MMC_TXUFLWHI_GBCNT_REG); + writel(0, ioaddr + SXGBE_MMC_CTL_REG); + spin_unlock(&priv->stats_lock); + + return stats; +} + +/* sxgbe_set_features - entry point to set offload features of the device. + * @dev : device pointer. + * @features : features which are required to be set. + * Description: + * This function is a driver entry point and called by Linux kernel whenever + * any device features are set or reset by user. + * Return value: + * This function returns 0 after setting or resetting device features. + */ +static int sxgbe_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) { + priv->hw->mac->enable_rx_csum(priv->ioaddr); + priv->rxcsum_insertion = true; + } else { + priv->hw->mac->disable_rx_csum(priv->ioaddr); + priv->rxcsum_insertion = false; + } + } + + return 0; +} + +/* sxgbe_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int sxgbe_change_mtu(struct net_device *dev, int new_mtu) +{ + /* RFC 791, page 25, "Every internet module must be able to forward + * a datagram of 68 octets without further fragmentation." + */ + if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) { + netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n", + MIN_MTU, MAX_MTU); + return -EINVAL; + } + + /* Return if the buffer sizes will not change */ + if (dev->mtu == new_mtu) + return 0; + + dev->mtu = new_mtu; + + if (!netif_running(dev)) + return 0; + + /* Recevice ring buffer size is needed to be set based on MTU. If MTU is + * changed then reinitilisation of the receive ring buffers need to be + * done. Hence bring interface down and bring interface back up + */ + sxgbe_release(dev); + return sxgbe_open(dev); +} + +static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n)); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n)); +} + +/** + * sxgbe_set_rx_mode - entry point for setting different receive mode of + * a device. unicast, multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever different receive mode like unicast, multicast and promiscuous + * must be enabled/disabled. + * Return value: + * void. + */ +static void sxgbe_set_rx_mode(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + void __iomem *ioaddr = (void __iomem *)priv->ioaddr; + unsigned int value = 0; + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + int reg = 1; + + netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n", + __func__, netdev_mc_count(dev), netdev_uc_count(dev)); + + if (dev->flags & IFF_PROMISC) { + value = SXGBE_FRAME_FILTER_PR; + + } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) || + (dev->flags & IFF_ALLMULTI)) { + value = SXGBE_FRAME_FILTER_PM; /* pass all multi */ + writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH); + writel(0xffffffff, ioaddr + SXGBE_HASH_LOW); + + } else if (!netdev_mc_empty(dev)) { + /* Hash filter for multicast */ + value = SXGBE_FRAME_FILTER_HMC; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table + */ + int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; + + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW); + writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH); + } + + /* Handle multiple unicast addresses (perfect filtering) */ + if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES) + /* Switch to promiscuous mode if more than 16 addrs + * are required + */ + value |= SXGBE_FRAME_FILTER_PR; + else { + netdev_for_each_uc_addr(ha, dev) { + sxgbe_set_umac_addr(ioaddr, ha->addr, reg); + reg++; + } + } +#ifdef FRAME_FILTER_DEBUG + /* Enable Receive all mode (to debug filtering_fail errors) */ + value |= SXGBE_FRAME_FILTER_RA; +#endif + writel(value, ioaddr + SXGBE_FRAME_FILTER); + + netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", + readl(ioaddr + SXGBE_FRAME_FILTER), + readl(ioaddr + SXGBE_HASH_HIGH), + readl(ioaddr + SXGBE_HASH_LOW)); +} + +/** + * sxgbe_config - entry point for changing configuration mode passed on by + * ifconfig + * @dev : pointer to the device structure + * @map : pointer to the device mapping structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever some device configuration is changed. + * Return value: + * This function returns 0 if success and appropriate error otherwise. + */ +static int sxgbe_config(struct net_device *dev, struct ifmap *map) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + /* Can't act on a running interface */ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* Don't allow changing the I/O address */ + if (map->base_addr != (unsigned long)priv->ioaddr) { + netdev_warn(dev, "can't change I/O address\n"); + return -EOPNOTSUPP; + } + + /* Don't allow changing the IRQ */ + if (map->irq != priv->irq) { + netdev_warn(dev, "not change IRQ number %d\n", priv->irq); + return -EOPNOTSUPP; + } + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * sxgbe_poll_controller - entry point for polling receive by device + * @dev : pointer to the device structure + * Description: + * This function is used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. + * Return value: + * Void. + */ +static void sxgbe_poll_controller(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + disable_irq(priv->irq); + sxgbe_rx_interrupt(priv->irq, dev); + enable_irq(priv->irq); +} +#endif + +/* sxgbe_ioctl - Entry point for the Ioctl + * @dev: Device pointer. + * @rq: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: IOCTL command + * Description: + * Currently it supports the phy_mii_ioctl(...) and HW time stamping. + */ +static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + return -EINVAL; + ret = phy_mii_ioctl(priv->phydev, rq, cmd); + break; + default: + break; + } + + return ret; +} + +static const struct net_device_ops sxgbe_netdev_ops = { + .ndo_open = sxgbe_open, + .ndo_start_xmit = sxgbe_xmit, + .ndo_stop = sxgbe_release, + .ndo_get_stats64 = sxgbe_get_stats64, + .ndo_change_mtu = sxgbe_change_mtu, + .ndo_set_features = sxgbe_set_features, + .ndo_set_rx_mode = sxgbe_set_rx_mode, + .ndo_tx_timeout = sxgbe_tx_timeout, + .ndo_do_ioctl = sxgbe_ioctl, + .ndo_set_config = sxgbe_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sxgbe_poll_controller, +#endif + .ndo_set_mac_address = eth_mac_addr, +}; + +/* Get the hardware ops */ +static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr) +{ + ops_ptr->mac = sxgbe_get_core_ops(); + ops_ptr->desc = sxgbe_get_desc_ops(); + ops_ptr->dma = sxgbe_get_dma_ops(); + ops_ptr->mtl = sxgbe_get_mtl_ops(); + + /* set the MDIO communication Address/Data regisers */ + ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG; + ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG; + + /* Assigning the default link settings + * no SXGBE defined default values to be set in registers, + * so assigning as 0 for port and duplex + */ + ops_ptr->link.port = 0; + ops_ptr->link.duplex = 0; + ops_ptr->link.speed = SXGBE_SPEED_10G; +} + +/** + * sxgbe_hw_init - Init the GMAC device + * @priv: driver private structure + * Description: this function checks the HW capability + * (if supported) and sets the driver's features. + */ +static int sxgbe_hw_init(struct sxgbe_priv_data * const priv) +{ + u32 ctrl_ids; + + priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL); + if(!priv->hw) + return -ENOMEM; + + /* get the hardware ops */ + sxgbe_get_ops(priv->hw); + + /* get the controller id */ + ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr); + priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16; + priv->hw->ctrl_id = (ctrl_ids & 0x000000ff); + pr_info("user ID: 0x%x, Controller ID: 0x%x\n", + priv->hw->ctrl_uid, priv->hw->ctrl_id); + + /* get the H/W features */ + if (!sxgbe_get_hw_features(priv)) + pr_info("Hardware features not found\n"); + + if (priv->hw_cap.tx_csum_offload) + pr_info("TX Checksum offload supported\n"); + + if (priv->hw_cap.rx_csum_offload) + pr_info("RX Checksum offload supported\n"); + + return 0; +} + +/** + * sxgbe_drv_probe + * @device: device pointer + * @plat_dat: platform data pointer + * @addr: iobase memory address + * Description: this is the main probe function used to + * call the alloc_etherdev, allocate the priv structure. + */ +struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device, + struct sxgbe_plat_data *plat_dat, + void __iomem *addr) +{ + struct sxgbe_priv_data *priv; + struct net_device *ndev; + int ret; + u8 queue_num; + + ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data), + SXGBE_TX_QUEUES, SXGBE_RX_QUEUES); + if (!ndev) + return NULL; + + SET_NETDEV_DEV(ndev, device); + + priv = netdev_priv(ndev); + priv->device = device; + priv->dev = ndev; + + sxgbe_set_ethtool_ops(ndev); + priv->plat = plat_dat; + priv->ioaddr = addr; + + /* Verify driver arguments */ + sxgbe_verify_args(); + + /* Init MAC and get the capabilities */ + ret = sxgbe_hw_init(priv); + if (ret) + goto error_free_netdev; + + /* allocate memory resources for Descriptor rings */ + ret = txring_mem_alloc(priv); + if (ret) + goto error_free_netdev; + + ret = rxring_mem_alloc(priv); + if (ret) + goto error_free_netdev; + + ndev->netdev_ops = &sxgbe_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GRO; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO); + + /* assign filtering support */ + ndev->priv_flags |= IFF_UNICAST_FLT; + + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + /* Enable TCP segmentation offload for all DMA channels */ + if (priv->hw_cap.tcpseg_offload) { + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + priv->hw->dma->enable_tso(priv->ioaddr, queue_num); + } + } + + /* Enable Rx checksum offload */ + if (priv->hw_cap.rx_csum_offload) { + priv->hw->mac->enable_rx_csum(priv->ioaddr); + priv->rxcsum_insertion = true; + } + + /* Initialise pause frame settings */ + priv->rx_pause = 1; + priv->tx_pause = 1; + + /* Rx Watchdog is available, enable depend on platform data */ + if (!priv->plat->riwt_off) { + priv->use_riwt = 1; + pr_info("Enable RX Mitigation via HW Watchdog Timer\n"); + } + + netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64); + + spin_lock_init(&priv->stats_lock); + + priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME); + if (IS_ERR(priv->sxgbe_clk)) { + netdev_warn(ndev, "%s: warning: cannot get CSR clock\n", + __func__); + goto error_clk_get; + } + + /* If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed. Viceversa the driver'll try to + * set the MDC clock dynamically according to the csr actual + * clock input. + */ + if (!priv->plat->clk_csr) + sxgbe_clk_csr_set(priv); + else + priv->clk_csr = priv->plat->clk_csr; + + /* MDIO bus Registration */ + ret = sxgbe_mdio_register(ndev); + if (ret < 0) { + netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n", + __func__, priv->plat->bus_id); + goto error_mdio_register; + } + + ret = register_netdev(ndev); + if (ret) { + pr_err("%s: ERROR %i registering the device\n", __func__, ret); + goto error_netdev_register; + } + + sxgbe_check_ether_addr(priv); + + return priv; + +error_mdio_register: + clk_put(priv->sxgbe_clk); +error_clk_get: +error_netdev_register: + netif_napi_del(&priv->napi); +error_free_netdev: + free_netdev(ndev); + + return NULL; +} + +/** + * sxgbe_drv_remove + * @ndev: net device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings. + */ +int sxgbe_drv_remove(struct net_device *ndev) +{ + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + netdev_info(ndev, "%s: removing driver\n", __func__); + + priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); + priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); + + priv->hw->mac->enable_tx(priv->ioaddr, false); + priv->hw->mac->enable_rx(priv->ioaddr, false); + + netif_napi_del(&priv->napi); + + sxgbe_mdio_unregister(ndev); + + unregister_netdev(ndev); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +int sxgbe_suspend(struct net_device *ndev) +{ + return 0; +} + +int sxgbe_resume(struct net_device *ndev) +{ + return 0; +} + +int sxgbe_freeze(struct net_device *ndev) +{ + return -ENOSYS; +} + +int sxgbe_restore(struct net_device *ndev) +{ + return -ENOSYS; +} +#endif /* CONFIG_PM */ + +/* Driver is configured as Platform driver */ +static int __init sxgbe_init(void) +{ + int ret; + + ret = sxgbe_register_platform(); + if (ret) + goto err; + return 0; +err: + pr_err("driver registration failed\n"); + return ret; +} + +static void __exit sxgbe_exit(void) +{ + sxgbe_unregister_platform(); +} + +module_init(sxgbe_init); +module_exit(sxgbe_exit); + +#ifndef MODULE +static int __init sxgbe_cmdline_opt(char *str) +{ + char *opt; + + if (!str || !*str) + return -EINVAL; + while ((opt = strsep(&str, ",")) != NULL) { + if (!strncmp(opt, "eee_timer:", 6)) { + if (kstrtoint(opt + 10, 0, &eee_timer)) + goto err; + } + } + return 0; + +err: + pr_err("%s: ERROR broken module parameter conversion\n", __func__); + return -EINVAL; +} + +__setup("sxgbeeth=", sxgbe_cmdline_opt); +#endif /* MODULE */ + + + +MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver"); + +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); +MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value"); + +MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>"); +MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>"); +MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>"); +MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>"); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c new file mode 100644 index 000000000000..01af2cbb479d --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c @@ -0,0 +1,244 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/io.h> +#include <linux/mii.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/slab.h> +#include <linux/sxgbe_platform.h> + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" + +#define SXGBE_SMA_WRITE_CMD 0x01 /* write command */ +#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ +#define SXGBE_SMA_READ_CMD 0x03 /* read command */ +#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ +#define SXGBE_MII_BUSY 0x00800000 /* mii busy */ + +static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) +{ + unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */ + + while (!time_after(jiffies, fin_time)) { + if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY)) + return 0; + cpu_relax(); + } + + return -EBUSY; +} + +static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd, + u16 phydata) +{ + u32 reg = phydata; + + reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM | + ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY; + writel(reg, sp->ioaddr + sp->hw->mii.data); +} + +static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, + int phyreg, u16 phydata) +{ + u32 reg; + + /* set mdio address register */ + reg = ((phyreg >> 16) & 0x1f) << 21; + reg |= (phyaddr << 16) | (phyreg & 0xffff); + writel(reg, sp->ioaddr + sp->hw->mii.addr); + + sxgbe_mdio_ctrl_data(sp, cmd, phydata); +} + +static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, + int phyreg, u16 phydata) +{ + u32 reg; + + writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG); + + /* set mdio address register */ + reg = (phyaddr << 16) | (phyreg & 0x1f); + writel(reg, sp->ioaddr + sp->hw->mii.addr); + + sxgbe_mdio_ctrl_data(sp, cmd, phydata); +} + +static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, + int phyreg, u16 phydata) +{ + const struct mii_regs *mii = &sp->hw->mii; + int rc; + + rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data); + if (rc < 0) + return rc; + + if (phyreg & MII_ADDR_C45) { + sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata); + } else { + /* Ports 0-3 only support C22. */ + if (phyaddr >= 4) + return -ENODEV; + + sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata); + } + + return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data); +} + +/** + * sxgbe_mdio_read + * @bus: points to the mii_bus structure + * @phyaddr: address of phy port + * @phyreg: address of register with in phy register + * Description: this function used for C45 and C22 MDIO Read + */ +static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + int rc; + + rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0); + if (rc < 0) + return rc; + + return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff; +} + +/** + * sxgbe_mdio_write + * @bus: points to the mii_bus structure + * @phyaddr: address of phy port + * @phyreg: address of phy registers + * @phydata: data to be written into phy register + * Description: this function is used for C45 and C22 MDIO write + */ +static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg, + phydata); +} + +int sxgbe_mdio_register(struct net_device *ndev) +{ + struct mii_bus *mdio_bus; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; + int err, phy_addr; + int *irqlist; + bool act; + + /* allocate the new mdio bus */ + mdio_bus = mdiobus_alloc(); + if (!mdio_bus) { + netdev_err(ndev, "%s: mii bus allocation failed\n", __func__); + return -ENOMEM; + } + + if (mdio_data->irqs) + irqlist = mdio_data->irqs; + else + irqlist = priv->mii_irq; + + /* assign mii bus fields */ + mdio_bus->name = "samsxgbe"; + mdio_bus->read = &sxgbe_mdio_read; + mdio_bus->write = &sxgbe_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", + mdio_bus->name, priv->plat->bus_id); + mdio_bus->priv = ndev; + mdio_bus->phy_mask = mdio_data->phy_mask; + mdio_bus->parent = priv->device; + + /* register with kernel subsystem */ + err = mdiobus_register(mdio_bus); + if (err != 0) { + netdev_err(ndev, "mdiobus register failed\n"); + goto mdiobus_err; + } + + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + struct phy_device *phy = mdio_bus->phy_map[phy_addr]; + + if (phy) { + char irq_num[4]; + char *irq_str; + /* If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if ((mdio_data->irqs == NULL) && + (mdio_data->probed_phy_irq > 0)) { + irqlist[phy_addr] = mdio_data->probed_phy_irq; + phy->irq = mdio_data->probed_phy_irq; + } + + /* If we're going to bind the MAC to this PHY bus, + * and no PHY number was provided to the MAC, + * use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = phy_addr; + + act = (priv->plat->phy_addr == phy_addr); + switch (phy->irq) { + case PHY_POLL: + irq_str = "POLL"; + break; + case PHY_IGNORE_INTERRUPT: + irq_str = "IGNORE"; + break; + default: + sprintf(irq_num, "%d", phy->irq); + irq_str = irq_num; + break; + } + netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", + phy->phy_id, phy_addr, irq_str, + dev_name(&phy->dev), act ? " active" : ""); + } + } + + priv->mii = mdio_bus; + + return 0; + +mdiobus_err: + mdiobus_free(mdio_bus); + return err; +} + +int sxgbe_mdio_unregister(struct net_device *ndev) +{ + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + if (!priv->mii) + return 0; + + mdiobus_unregister(priv->mii); + priv->mii->priv = NULL; + mdiobus_free(priv->mii); + priv->mii = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c new file mode 100644 index 000000000000..324681c2bb74 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c @@ -0,0 +1,254 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/jiffies.h> + +#include "sxgbe_mtl.h" +#include "sxgbe_reg.h" + +static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg, + unsigned int raa) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG); + reg_val &= ETS_RST; + + /* ETS Algorith */ + switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) { + case ETS_WRR: + reg_val &= ETS_WRR; + break; + case ETS_WFQ: + reg_val |= ETS_WFQ; + break; + case ETS_DWRR: + reg_val |= ETS_DWRR; + break; + } + writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG); + + switch (raa & SXGBE_MTL_OPMODE_RAAMASK) { + case RAA_SP: + reg_val &= RAA_SP; + break; + case RAA_WSP: + reg_val |= RAA_WSP; + break; + } + writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG); +} + +/* For Dynamic DMA channel mapping for Rx queue */ +static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr) +{ + writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG); + writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG); + writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG); +} + +static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num, + int queue_fifo) +{ + u32 fifo_bits, reg_val; + + /* 0 means 256 bytes */ + fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1; + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT); + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num, + int queue_fifo) +{ + u32 fifo_bits, reg_val; + + /* 0 means 256 bytes */ + fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1; + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT); + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_ENABLE_QUEUE; + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + reg_val &= ~SXGBE_MTL_ENABLE_QUEUE; + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num, + int threshold) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE); + reg_val |= (threshold << RX_FC_ACTIVE); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_ENABLE_FC; + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num, + int threshold) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE); + reg_val |= (threshold << RX_FC_DEACTIVE); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_RXQ_OP_FEP; + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_RXQ_OP_FUP; + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + + +static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num, + int tx_mode) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + /* TX specific MTL mode settings */ + if (tx_mode == SXGBE_MTL_SFMODE) { + reg_val |= SXGBE_MTL_SFMODE; + } else { + /* set the TTC values */ + if (tx_mode <= 64) + reg_val |= MTL_CONTROL_TTC_64; + else if (tx_mode <= 96) + reg_val |= MTL_CONTROL_TTC_96; + else if (tx_mode <= 128) + reg_val |= MTL_CONTROL_TTC_128; + else if (tx_mode <= 192) + reg_val |= MTL_CONTROL_TTC_192; + else if (tx_mode <= 256) + reg_val |= MTL_CONTROL_TTC_256; + else if (tx_mode <= 384) + reg_val |= MTL_CONTROL_TTC_384; + else + reg_val |= MTL_CONTROL_TTC_512; + } + + /* write into TXQ operation register */ + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num, + int rx_mode) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + /* RX specific MTL mode settings */ + if (rx_mode == SXGBE_RX_MTL_SFMODE) { + reg_val |= SXGBE_RX_MTL_SFMODE; + } else { + if (rx_mode <= 64) + reg_val |= MTL_CONTROL_RTC_64; + else if (rx_mode <= 96) + reg_val |= MTL_CONTROL_RTC_96; + else if (rx_mode <= 128) + reg_val |= MTL_CONTROL_RTC_128; + } + + /* write into RXQ operation register */ + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static const struct sxgbe_mtl_ops mtl_ops = { + .mtl_set_txfifosize = sxgbe_mtl_set_txfifosize, + .mtl_set_rxfifosize = sxgbe_mtl_set_rxfifosize, + .mtl_enable_txqueue = sxgbe_mtl_enable_txqueue, + .mtl_disable_txqueue = sxgbe_mtl_disable_txqueue, + .mtl_dynamic_dma_rxqueue = sxgbe_mtl_dma_dm_rxqueue, + .set_tx_mtl_mode = sxgbe_set_tx_mtl_mode, + .set_rx_mtl_mode = sxgbe_set_rx_mtl_mode, + .mtl_init = sxgbe_mtl_init, + .mtl_fc_active = sxgbe_mtl_fc_active, + .mtl_fc_deactive = sxgbe_mtl_fc_deactive, + .mtl_fc_enable = sxgbe_mtl_fc_enable, + .mtl_fep_enable = sxgbe_mtl_fep_enable, + .mtl_fep_disable = sxgbe_mtl_fep_disable, + .mtl_fup_enable = sxgbe_mtl_fup_enable, + .mtl_fup_disable = sxgbe_mtl_fup_disable +}; + +const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void) +{ + return &mtl_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h new file mode 100644 index 000000000000..7e4810c4137e --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h @@ -0,0 +1,104 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_MTL_H__ +#define __SXGBE_MTL_H__ + +#define SXGBE_MTL_OPMODE_ESTMASK 0x3 +#define SXGBE_MTL_OPMODE_RAAMASK 0x1 +#define SXGBE_MTL_FCMASK 0x7 +#define SXGBE_MTL_TX_FIFO_DIV 256 +#define SXGBE_MTL_RX_FIFO_DIV 256 + +#define SXGBE_MTL_RXQ_OP_FEP BIT(4) +#define SXGBE_MTL_RXQ_OP_FUP BIT(3) +#define SXGBE_MTL_ENABLE_FC 0x80 + +#define ETS_WRR 0xFFFFFF9F +#define ETS_RST 0xFFFFFF9F +#define ETS_WFQ 0x00000020 +#define ETS_DWRR 0x00000040 +#define RAA_SP 0xFFFFFFFB +#define RAA_WSP 0x00000004 + +#define RX_QUEUE_DYNAMIC 0x80808080 +#define RX_FC_ACTIVE 8 +#define RX_FC_DEACTIVE 13 + +enum ttc_control { + MTL_CONTROL_TTC_64 = 0x00000000, + MTL_CONTROL_TTC_96 = 0x00000020, + MTL_CONTROL_TTC_128 = 0x00000030, + MTL_CONTROL_TTC_192 = 0x00000040, + MTL_CONTROL_TTC_256 = 0x00000050, + MTL_CONTROL_TTC_384 = 0x00000060, + MTL_CONTROL_TTC_512 = 0x00000070, +}; + +enum rtc_control { + MTL_CONTROL_RTC_64 = 0x00000000, + MTL_CONTROL_RTC_96 = 0x00000002, + MTL_CONTROL_RTC_128 = 0x00000003, +}; + +enum flow_control_th { + MTL_FC_FULL_1K = 0x00000000, + MTL_FC_FULL_2K = 0x00000001, + MTL_FC_FULL_4K = 0x00000002, + MTL_FC_FULL_5K = 0x00000003, + MTL_FC_FULL_6K = 0x00000004, + MTL_FC_FULL_8K = 0x00000005, + MTL_FC_FULL_16K = 0x00000006, + MTL_FC_FULL_24K = 0x00000007, +}; + +struct sxgbe_mtl_ops { + void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg, + unsigned int raa); + + void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num, + int mtl_fifo); + + void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num, + int queue_fifo); + + void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num); + + void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num); + + void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num, + int tx_mode); + + void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num, + int rx_mode); + + void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr); + + void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num, + int threshold); + + void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num, + int threshold); + + void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num); +}; + +const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void); + +#endif /* __SXGBE_MTL_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c new file mode 100644 index 000000000000..b147d469a799 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -0,0 +1,259 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/etherdevice.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/sxgbe_platform.h> + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" + +#ifdef CONFIG_OF +static int sxgbe_probe_config_dt(struct platform_device *pdev, + struct sxgbe_plat_data *plat, + const char **mac) +{ + struct device_node *np = pdev->dev.of_node; + struct sxgbe_dma_cfg *dma_cfg; + + if (!np) + return -ENODEV; + + *mac = of_get_mac_address(np); + plat->interface = of_get_phy_mode(np); + + plat->bus_id = of_alias_get_id(np, "ethernet"); + if (plat->bus_id < 0) + plat->bus_id = 0; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); + if (!dma_cfg) + return -ENOMEM; + + plat->dma_cfg = dma_cfg; + of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl); + if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0) + dma_cfg->fixed_burst = true; + + return 0; +} +#else +static int sxgbe_probe_config_dt(struct platform_device *pdev, + struct sxgbe_plat_data *plat, + const char **mac) +{ + return -ENOSYS; +} +#endif /* CONFIG_OF */ + +/** + * sxgbe_platform_probe + * @pdev: platform device pointer + * Description: platform_device probe function. It allocates + * the necessary resources and invokes the main to init + * the net device, register the mdio bus etc. + */ +static int sxgbe_platform_probe(struct platform_device *pdev) +{ + int ret; + int i, chan; + struct resource *res; + struct device *dev = &pdev->dev; + void __iomem *addr; + struct sxgbe_priv_data *priv = NULL; + struct sxgbe_plat_data *plat_dat = NULL; + const char *mac = NULL; + struct net_device *ndev = platform_get_drvdata(pdev); + struct device_node *node = dev->of_node; + + /* Get memory resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err_out; + + addr = devm_ioremap_resource(dev, res); + if (IS_ERR(addr)) + return PTR_ERR(addr); + + if (pdev->dev.of_node) { + plat_dat = devm_kzalloc(&pdev->dev, + sizeof(struct sxgbe_plat_data), + GFP_KERNEL); + if (!plat_dat) + return -ENOMEM; + + ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac); + if (ret) { + pr_err("%s: main dt probe failed\n", __func__); + return ret; + } + } + + /* Get MAC address if available (DT) */ + if (mac) + ether_addr_copy(priv->dev->dev_addr, mac); + + priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr); + if (!priv) { + pr_err("%s: main driver probe failed\n", __func__); + goto err_out; + } + + /* Get the SXGBE common INT information */ + priv->irq = irq_of_parse_and_map(node, 0); + if (priv->irq <= 0) { + dev_err(dev, "sxgbe common irq parsing failed\n"); + goto err_drv_remove; + } + + /* Get the TX/RX IRQ numbers */ + for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { + priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); + if (priv->txq[i]->irq_no <= 0) { + dev_err(dev, "sxgbe tx irq parsing failed\n"); + goto err_tx_irq_unmap; + } + } + + for (i = 0; i < SXGBE_RX_QUEUES; i++) { + priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++); + if (priv->rxq[i]->irq_no <= 0) { + dev_err(dev, "sxgbe rx irq parsing failed\n"); + goto err_rx_irq_unmap; + } + } + + priv->lpi_irq = irq_of_parse_and_map(node, chan); + if (priv->lpi_irq <= 0) { + dev_err(dev, "sxgbe lpi irq parsing failed\n"); + goto err_rx_irq_unmap; + } + + platform_set_drvdata(pdev, priv->dev); + + pr_debug("platform driver registration completed\n"); + + return 0; + +err_rx_irq_unmap: + while (--i) + irq_dispose_mapping(priv->rxq[i]->irq_no); + i = SXGBE_TX_QUEUES; +err_tx_irq_unmap: + while (--i) + irq_dispose_mapping(priv->txq[i]->irq_no); + irq_dispose_mapping(priv->irq); +err_drv_remove: + sxgbe_drv_remove(ndev); +err_out: + return -ENODEV; +} + +/** + * sxgbe_platform_remove + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and calls the platforms hook and release the resources (e.g. mem). + */ +static int sxgbe_platform_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + int ret = sxgbe_drv_remove(ndev); + + return ret; +} + +#ifdef CONFIG_PM +static int sxgbe_platform_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_suspend(ndev); +} + +static int sxgbe_platform_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_resume(ndev); +} + +static int sxgbe_platform_freeze(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_freeze(ndev); +} + +static int sxgbe_platform_restore(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_restore(ndev); +} + +static const struct dev_pm_ops sxgbe_platform_pm_ops = { + .suspend = sxgbe_platform_suspend, + .resume = sxgbe_platform_resume, + .freeze = sxgbe_platform_freeze, + .thaw = sxgbe_platform_restore, + .restore = sxgbe_platform_restore, +}; +#else +static const struct dev_pm_ops sxgbe_platform_pm_ops; +#endif /* CONFIG_PM */ + +static const struct of_device_id sxgbe_dt_ids[] = { + { .compatible = "samsung,sxgbe-v2.0a"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sxgbe_dt_ids); + +static struct platform_driver sxgbe_platform_driver = { + .probe = sxgbe_platform_probe, + .remove = sxgbe_platform_remove, + .driver = { + .name = SXGBE_RESOURCE_NAME, + .owner = THIS_MODULE, + .pm = &sxgbe_platform_pm_ops, + .of_match_table = of_match_ptr(sxgbe_dt_ids), + }, +}; + +int sxgbe_register_platform(void) +{ + int err; + + err = platform_driver_register(&sxgbe_platform_driver); + if (err) + pr_err("failed to register the platform driver\n"); + + return err; +} + +void sxgbe_unregister_platform(void) +{ + platform_driver_unregister(&sxgbe_platform_driver); +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h new file mode 100644 index 000000000000..5a89acb4c505 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h @@ -0,0 +1,488 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_REGMAP_H__ +#define __SXGBE_REGMAP_H__ + +/* SXGBE MAC Registers */ +#define SXGBE_CORE_TX_CONFIG_REG 0x0000 +#define SXGBE_CORE_RX_CONFIG_REG 0x0004 +#define SXGBE_CORE_PKT_FILTER_REG 0x0008 +#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C +#define SXGBE_CORE_HASH_TABLE_REG0 0x0010 +#define SXGBE_CORE_HASH_TABLE_REG1 0x0014 +#define SXGBE_CORE_HASH_TABLE_REG2 0x0018 +#define SXGBE_CORE_HASH_TABLE_REG3 0x001C +#define SXGBE_CORE_HASH_TABLE_REG4 0x0020 +#define SXGBE_CORE_HASH_TABLE_REG5 0x0024 +#define SXGBE_CORE_HASH_TABLE_REG6 0x0028 +#define SXGBE_CORE_HASH_TABLE_REG7 0x002C + +/* EEE-LPI Registers */ +#define SXGBE_CORE_LPI_CTRL_STATUS 0x00D0 +#define SXGBE_CORE_LPI_TIMER_CTRL 0x00D4 + +/* VLAN Specific Registers */ +#define SXGBE_CORE_VLAN_TAG_REG 0x0050 +#define SXGBE_CORE_VLAN_HASHTAB_REG 0x0058 +#define SXGBE_CORE_VLAN_INSCTL_REG 0x0060 +#define SXGBE_CORE_VLAN_INNERCTL_REG 0x0064 +#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C + +/* Flow Contol Registers */ +#define SXGBE_CORE_TX_Q0_FLOWCTL_REG 0x0070 +#define SXGBE_CORE_TX_Q1_FLOWCTL_REG 0x0074 +#define SXGBE_CORE_TX_Q2_FLOWCTL_REG 0x0078 +#define SXGBE_CORE_TX_Q3_FLOWCTL_REG 0x007C +#define SXGBE_CORE_TX_Q4_FLOWCTL_REG 0x0080 +#define SXGBE_CORE_TX_Q5_FLOWCTL_REG 0x0084 +#define SXGBE_CORE_TX_Q6_FLOWCTL_REG 0x0088 +#define SXGBE_CORE_TX_Q7_FLOWCTL_REG 0x008C +#define SXGBE_CORE_RX_FLOWCTL_REG 0x0090 +#define SXGBE_CORE_RX_CTL0_REG 0x00A0 +#define SXGBE_CORE_RX_CTL1_REG 0x00A4 +#define SXGBE_CORE_RX_CTL2_REG 0x00A8 +#define SXGBE_CORE_RX_CTL3_REG 0x00AC + +/* Interrupt Registers */ +#define SXGBE_CORE_INT_STATUS_REG 0x00B0 +#define SXGBE_CORE_INT_ENABLE_REG 0x00B4 +#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8 +#define SXGBE_CORE_PMT_CTL_STATUS_REG 0x00C0 +#define SXGBE_CORE_RWK_PKT_FILTER_REG 0x00C4 +#define SXGBE_CORE_VERSION_REG 0x0110 +#define SXGBE_CORE_DEBUG_REG 0x0114 +#define SXGBE_CORE_HW_FEA_REG(index) (0x011C + index * 4) + +/* SMA(MDIO) module registers */ +#define SXGBE_MDIO_SCMD_ADD_REG 0x0200 +#define SXGBE_MDIO_SCMD_DATA_REG 0x0204 +#define SXGBE_MDIO_CCMD_WADD_REG 0x0208 +#define SXGBE_MDIO_CCMD_WDATA_REG 0x020C +#define SXGBE_MDIO_CSCAN_PORT_REG 0x0210 +#define SXGBE_MDIO_INT_STATUS_REG 0x0214 +#define SXGBE_MDIO_INT_ENABLE_REG 0x0218 +#define SXGBE_MDIO_PORT_CONDCON_REG 0x021C +#define SXGBE_MDIO_CLAUSE22_PORT_REG 0x0220 + +/* port specific, addr = 0-3 */ +#define SXGBE_MDIO_DEV_BASE_REG 0x0230 +#define SXGBE_MDIO_PORT_DEV_REG(addr) \ + (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0) +#define SXGBE_MDIO_PORT_LSTATUS_REG(addr) \ + (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4) +#define SXGBE_MDIO_PORT_ALIVE_REG(addr) \ + (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8) + +#define SXGBE_CORE_GPIO_CTL_REG 0x0278 +#define SXGBE_CORE_GPIO_STATUS_REG 0x027C + +/* Address registers for filtering */ +#define SXGBE_CORE_ADD_BASE_REG 0x0300 + +/* addr = 0-31 */ +#define SXGBE_CORE_ADD_HIGHOFFSET(addr) \ + (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0) +#define SXGBE_CORE_ADD_LOWOFFSET(addr) \ + (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4) + +/* SXGBE MMC registers */ +#define SXGBE_MMC_CTL_REG 0x0800 +#define SXGBE_MMC_RXINT_STATUS_REG 0x0804 +#define SXGBE_MMC_TXINT_STATUS_REG 0x0808 +#define SXGBE_MMC_RXINT_ENABLE_REG 0x080C +#define SXGBE_MMC_TXINT_ENABLE_REG 0x0810 + +/* TX specific counters */ +#define SXGBE_MMC_TXOCTETHI_GBCNT_REG 0x0814 +#define SXGBE_MMC_TXOCTETLO_GBCNT_REG 0x0818 +#define SXGBE_MMC_TXFRAMELO_GBCNT_REG 0x081C +#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG 0x0820 +#define SXGBE_MMC_TXBROADLO_GCNT_REG 0x0824 +#define SXGBE_MMC_TXBROADHI_GCNT_REG 0x0828 +#define SXGBE_MMC_TXMULTILO_GCNT_REG 0x082C +#define SXGBE_MMC_TXMULTIHI_GCNT_REG 0x0830 +#define SXGBE_MMC_TX64LO_GBCNT_REG 0x0834 +#define SXGBE_MMC_TX64HI_GBCNT_REG 0x0838 +#define SXGBE_MMC_TX65TO127LO_GBCNT_REG 0x083C +#define SXGBE_MMC_TX65TO127HI_GBCNT_REG 0x0840 +#define SXGBE_MMC_TX128TO255LO_GBCNT_REG 0x0844 +#define SXGBE_MMC_TX128TO255HI_GBCNT_REG 0x0848 +#define SXGBE_MMC_TX256TO511LO_GBCNT_REG 0x084C +#define SXGBE_MMC_TX256TO511HI_GBCNT_REG 0x0850 +#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG 0x0854 +#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG 0x0858 +#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG 0x085C +#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG 0x0860 +#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG 0x0864 +#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG 0x0868 +#define SXGBE_MMC_TXMULTILO_GBCNT_REG 0x086C +#define SXGBE_MMC_TXMULTIHI_GBCNT_REG 0x0870 +#define SXGBE_MMC_TXBROADLO_GBCNT_REG 0x0874 +#define SXGBE_MMC_TXBROADHI_GBCNT_REG 0x0878 +#define SXGBE_MMC_TXUFLWLO_GBCNT_REG 0x087C +#define SXGBE_MMC_TXUFLWHI_GBCNT_REG 0x0880 +#define SXGBE_MMC_TXOCTETLO_GCNT_REG 0x0884 +#define SXGBE_MMC_TXOCTETHI_GCNT_REG 0x0888 +#define SXGBE_MMC_TXFRAMELO_GCNT_REG 0x088C +#define SXGBE_MMC_TXFRAMEHI_GCNT_REG 0x0890 +#define SXGBE_MMC_TXPAUSELO_CNT_REG 0x0894 +#define SXGBE_MMC_TXPAUSEHI_CNT_REG 0x0898 +#define SXGBE_MMC_TXVLANLO_GCNT_REG 0x089C +#define SXGBE_MMC_TXVLANHI_GCNT_REG 0x08A0 + +/* RX specific counters */ +#define SXGBE_MMC_RXFRAMELO_GBCNT_REG 0x0900 +#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG 0x0904 +#define SXGBE_MMC_RXOCTETLO_GBCNT_REG 0x0908 +#define SXGBE_MMC_RXOCTETHI_GBCNT_REG 0x090C +#define SXGBE_MMC_RXOCTETLO_GCNT_REG 0x0910 +#define SXGBE_MMC_RXOCTETHI_GCNT_REG 0x0914 +#define SXGBE_MMC_RXBROADLO_GCNT_REG 0x0918 +#define SXGBE_MMC_RXBROADHI_GCNT_REG 0x091C +#define SXGBE_MMC_RXMULTILO_GCNT_REG 0x0920 +#define SXGBE_MMC_RXMULTIHI_GCNT_REG 0x0924 +#define SXGBE_MMC_RXCRCERRLO_REG 0x0928 +#define SXGBE_MMC_RXCRCERRHI_REG 0x092C +#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG 0x0930 +#define SXGBE_MMC_RXJABBERERR_REG 0x0934 +#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG 0x0938 +#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG 0x093C +#define SXGBE_MMC_RX64LO_GBCNT_REG 0x0940 +#define SXGBE_MMC_RX64HI_GBCNT_REG 0x0944 +#define SXGBE_MMC_RX65TO127LO_GBCNT_REG 0x0948 +#define SXGBE_MMC_RX65TO127HI_GBCNT_REG 0x094C +#define SXGBE_MMC_RX128TO255LO_GBCNT_REG 0x0950 +#define SXGBE_MMC_RX128TO255HI_GBCNT_REG 0x0954 +#define SXGBE_MMC_RX256TO511LO_GBCNT_REG 0x0958 +#define SXGBE_MMC_RX256TO511HI_GBCNT_REG 0x095C +#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG 0x0960 +#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG 0x0964 +#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG 0x0968 +#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG 0x096C +#define SXGBE_MMC_RXUNICASTLO_GCNT_REG 0x0970 +#define SXGBE_MMC_RXUNICASTHI_GCNT_REG 0x0974 +#define SXGBE_MMC_RXLENERRLO_REG 0x0978 +#define SXGBE_MMC_RXLENERRHI_REG 0x097C +#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG 0x0980 +#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG 0x0984 +#define SXGBE_MMC_RXPAUSELO_CNT_REG 0x0988 +#define SXGBE_MMC_RXPAUSEHI_CNT_REG 0x098C +#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG 0x0990 +#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG 0x0994 +#define SXGBE_MMC_RXVLANLO_GBCNT_REG 0x0998 +#define SXGBE_MMC_RXVLANHI_GBCNT_REG 0x099C +#define SXGBE_MMC_RXWATCHDOG_ERR_REG 0x09A0 + +/* L3/L4 function registers */ +#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00 +#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00 +#define SXGBE_CORE_L34_DATA_REG 0x0C04 + +/* ARP registers */ +#define SXGBE_CORE_ARP_ADD_REG 0x0C10 + +/* RSS registers */ +#define SXGBE_CORE_RSS_CTL_REG 0x0C80 +#define SXGBE_CORE_RSS_ADD_REG 0x0C88 +#define SXGBE_CORE_RSS_DATA_REG 0x0C8C + +/* RSS control register bits */ +#define SXGBE_CORE_RSS_CTL_UDP4TE BIT(3) +#define SXGBE_CORE_RSS_CTL_TCP4TE BIT(2) +#define SXGBE_CORE_RSS_CTL_IP2TE BIT(1) +#define SXGBE_CORE_RSS_CTL_RSSE BIT(0) + +/* IEEE 1588 registers */ +#define SXGBE_CORE_TSTAMP_CTL_REG 0x0D00 +#define SXGBE_CORE_SUBSEC_INC_REG 0x0D04 +#define SXGBE_CORE_SYSTIME_SEC_REG 0x0D0C +#define SXGBE_CORE_SYSTIME_NSEC_REG 0x0D10 +#define SXGBE_CORE_SYSTIME_SECUP_REG 0x0D14 +#define SXGBE_CORE_TSTAMP_ADD_REG 0x0D18 +#define SXGBE_CORE_SYSTIME_HWORD_REG 0x0D1C +#define SXGBE_CORE_TSTAMP_STATUS_REG 0x0D20 +#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30 +#define SXGBE_CORE_TXTIME_STATUSSEC_REG 0x0D34 + +/* Auxiliary registers */ +#define SXGBE_CORE_AUX_CTL_REG 0x0D40 +#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG 0x0D48 +#define SXGBE_CORE_AUX_TSTAMP_SEC_REG 0x0D4C +#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG 0x0D50 +#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG 0x0D54 +#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG 0x0D58 +#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C +#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG 0x0D60 +#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64 + +/* PPS registers */ +#define SXGBE_CORE_PPS_CTL_REG 0x0D70 +#define SXGBE_CORE_PPS_BASE 0x0D80 + +/* addr = 0 - 3 */ +#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0) +#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4) +#define SXGBE_CORE_PPS_INTERVAL_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8) +#define SXGBE_CORE_PPS_WIDTH_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC) +#define SXGBE_CORE_PTO_CTL_REG 0x0DC0 +#define SXGBE_CORE_SRCPORT_ITY0_REG 0x0DC4 +#define SXGBE_CORE_SRCPORT_ITY1_REG 0x0DC8 +#define SXGBE_CORE_SRCPORT_ITY2_REG 0x0DCC +#define SXGBE_CORE_LOGMSG_LEVEL_REG 0x0DD0 + +/* SXGBE MTL Registers */ +#define SXGBE_MTL_BASE_REG 0x1000 +#define SXGBE_MTL_OP_MODE_REG (SXGBE_MTL_BASE_REG + 0x0000) +#define SXGBE_MTL_DEBUG_CTL_REG (SXGBE_MTL_BASE_REG + 0x0008) +#define SXGBE_MTL_DEBUG_STATUS_REG (SXGBE_MTL_BASE_REG + 0x000C) +#define SXGBE_MTL_FIFO_DEBUGDATA_REG (SXGBE_MTL_BASE_REG + 0x0010) +#define SXGBE_MTL_INT_STATUS_REG (SXGBE_MTL_BASE_REG + 0x0020) +#define SXGBE_MTL_RXQ_DMAMAP0_REG (SXGBE_MTL_BASE_REG + 0x0030) +#define SXGBE_MTL_RXQ_DMAMAP1_REG (SXGBE_MTL_BASE_REG + 0x0034) +#define SXGBE_MTL_RXQ_DMAMAP2_REG (SXGBE_MTL_BASE_REG + 0x0038) +#define SXGBE_MTL_TX_PRTYMAP0_REG (SXGBE_MTL_BASE_REG + 0x0040) +#define SXGBE_MTL_TX_PRTYMAP1_REG (SXGBE_MTL_BASE_REG + 0x0044) + +/* TC/Queue registers, qnum=0-15 */ +#define SXGBE_MTL_TC_TXBASE_REG (SXGBE_MTL_BASE_REG + 0x0100) +#define SXGBE_MTL_TXQ_OPMODE_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00) +#define SXGBE_MTL_SFMODE BIT(1) +#define SXGBE_MTL_FIFO_LSHIFT 16 +#define SXGBE_MTL_ENABLE_QUEUE 0x00000008 +#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04) +#define SXGBE_MTL_TXQ_DEBUG_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08) +#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10) +#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14) +#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18) + +#define SXGBE_MTL_TC_RXBASE_REG 0x1140 +#define SXGBE_RX_MTL_SFMODE BIT(5) +#define SXGBE_MTL_RXQ_OPMODE_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00) +#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04) +#define SXGBE_MTL_RXQ_DEBUG_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08) +#define SXGBE_MTL_RXQ_CTL_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C) +#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30) +#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34) + +/* SXGBE DMA Registers */ +#define SXGBE_DMA_BASE_REG 0x3000 +#define SXGBE_DMA_MODE_REG (SXGBE_DMA_BASE_REG + 0x0000) +#define SXGBE_DMA_SOFT_RESET BIT(0) +#define SXGBE_DMA_SYSBUS_MODE_REG (SXGBE_DMA_BASE_REG + 0x0004) +#define SXGBE_DMA_AXI_UNDEF_BURST BIT(0) +#define SXGBE_DMA_ENHACE_ADDR_MODE BIT(11) +#define SXGBE_DMA_INT_STATUS_REG (SXGBE_DMA_BASE_REG + 0x0008) +#define SXGBE_DMA_AXI_ARCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0010) +#define SXGBE_DMA_AXI_AWCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0018) +#define SXGBE_DMA_DEBUG_STATUS0_REG (SXGBE_DMA_BASE_REG + 0x0020) +#define SXGBE_DMA_DEBUG_STATUS1_REG (SXGBE_DMA_BASE_REG + 0x0024) +#define SXGBE_DMA_DEBUG_STATUS2_REG (SXGBE_DMA_BASE_REG + 0x0028) +#define SXGBE_DMA_DEBUG_STATUS3_REG (SXGBE_DMA_BASE_REG + 0x002C) +#define SXGBE_DMA_DEBUG_STATUS4_REG (SXGBE_DMA_BASE_REG + 0x0030) +#define SXGBE_DMA_DEBUG_STATUS5_REG (SXGBE_DMA_BASE_REG + 0x0034) + +/* Channel Registers, cha_num = 0-15 */ +#define SXGBE_DMA_CHA_BASE_REG \ + (SXGBE_DMA_BASE_REG + 0x0100) +#define SXGBE_DMA_CHA_CTL_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00) +#define SXGBE_DMA_PBL_X8MODE BIT(16) +#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE BIT(12) +#define SXGBE_DMA_CHA_TXCTL_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04) +#define SXGBE_DMA_CHA_RXCTL_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08) +#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10) +#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14) +#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18) +#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C) +#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24) +#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C) +#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30) +#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34) +#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38) +#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C) +#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44) +#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C) +#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50) +#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54) +#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58) +#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C) +#define SXGBE_DMA_CHA_STATUS_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60) + +/* TX DMA control register specific */ +#define SXGBE_TX_START_DMA BIT(0) + +/* sxgbe tx configuration register bitfields */ +#define SXGBE_SPEED_10G 0x0 +#define SXGBE_SPEED_2_5G 0x1 +#define SXGBE_SPEED_1G 0x2 +#define SXGBE_SPEED_LSHIFT 29 + +#define SXGBE_TX_ENABLE BIT(0) +#define SXGBE_TX_DISDIC_ALGO BIT(1) +#define SXGBE_TX_JABBER_DISABLE BIT(16) + +/* sxgbe rx configuration register bitfields */ +#define SXGBE_RX_ENABLE BIT(0) +#define SXGBE_RX_ACS_ENABLE BIT(1) +#define SXGBE_RX_WATCHDOG_DISABLE BIT(7) +#define SXGBE_RX_JUMBPKT_ENABLE BIT(8) +#define SXGBE_RX_CSUMOFFLOAD_ENABLE BIT(9) +#define SXGBE_RX_LOOPBACK_ENABLE BIT(10) +#define SXGBE_RX_ARPOFFLOAD_ENABLE BIT(31) + +/* sxgbe vlan Tag Register bitfields */ +#define SXGBE_VLAN_SVLAN_ENABLE BIT(18) +#define SXGBE_VLAN_DOUBLEVLAN_ENABLE BIT(26) +#define SXGBE_VLAN_INNERVLAN_ENABLE BIT(27) + +/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields + * Below fields same for Inner VLAN Tag Inclusion + * Register(0x0064) register + */ +enum vlan_tag_ctl_tx { + VLAN_TAG_TX_NOP, + VLAN_TAG_TX_DEL, + VLAN_TAG_TX_INSERT, + VLAN_TAG_TX_REPLACE +}; +#define SXGBE_VLAN_PRTY_CTL BIT(18) +#define SXGBE_VLAN_CSVL_CTL BIT(19) + +/* SXGBE TX Q Flow Control Register bitfields */ +#define SXGBE_TX_FLOW_CTL_FCB BIT(0) +#define SXGBE_TX_FLOW_CTL_TFB BIT(1) + +/* SXGBE RX Q Flow Control Register bitfields */ +#define SXGBE_RX_FLOW_CTL_ENABLE BIT(0) +#define SXGBE_RX_UNICAST_DETECT BIT(1) +#define SXGBE_RX_PRTYFLOW_CTL_ENABLE BIT(8) + +/* sxgbe rx Q control0 register bitfields */ +#define SXGBE_RX_Q_ENABLE 0x2 + +/* SXGBE hardware features bitfield specific */ +/* Capability Register 0 */ +#define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1) +#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4) +#define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5) +#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6) +#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7) +#define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8) +#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9) +#define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12) +#define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13) +#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14) +#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap) ((cap & 0x00010000) >> 16) +#define SXGBE_HW_FEAT_MACADDR_COUNT(cap) ((cap & 0x007C0000) >> 18) +#define SXGBE_HW_FEAT_TSTMAP_SRC(cap) ((cap & 0x06000000) >> 25) +#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap) ((cap & 0x08000000) >> 27) + +/* Capability Register 1 */ +#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap) ((cap & 0x0000001F)) +#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap) ((cap & 0x000007C0) >> 6) +#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap) ((cap & 0x00002000) >> 13) +#define SXGBE_HW_FEAT_DCB(cap) ((cap & 0x00010000) >> 16) +#define SXGBE_HW_FEAT_SPLIT_HDR(cap) ((cap & 0x00020000) >> 17) +#define SXGBE_HW_FEAT_TSO(cap) ((cap & 0x00040000) >> 18) +#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap) ((cap & 0x00080000) >> 19) +#define SXGBE_HW_FEAT_RSS(cap) ((cap & 0x00100000) >> 20) +#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap) ((cap & 0x03000000) >> 24) +#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap) ((cap & 0x78000000) >> 27) + +/* Capability Register 2 */ +#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap) ((cap & 0x0000000F)) +#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap) ((cap & 0x000003C0) >> 6) +#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap) ((cap & 0x0000F000) >> 12) +#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap) ((cap & 0x003C0000) >> 18) +#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap) ((cap & 0x07000000) >> 24) +#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap) ((cap & 0x70000000) >> 28) + +/* DMAchannel interrupt enable specific */ +/* DMA Normal interrupt */ +#define SXGBE_DMA_INT_ENA_NIE BIT(16) /* Normal Summary */ +#define SXGBE_DMA_INT_ENA_TIE BIT(0) /* Transmit Interrupt */ +#define SXGBE_DMA_INT_ENA_TUE BIT(2) /* Transmit Buffer Unavailable */ +#define SXGBE_DMA_INT_ENA_RIE BIT(6) /* Receive Interrupt */ + +#define SXGBE_DMA_INT_NORMAL \ + (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE | \ + SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE) + +/* DMA Abnormal interrupt */ +#define SXGBE_DMA_INT_ENA_AIE BIT(15) /* Abnormal Summary */ +#define SXGBE_DMA_INT_ENA_TSE BIT(1) /* Transmit Stopped */ +#define SXGBE_DMA_INT_ENA_RUE BIT(7) /* Receive Buffer Unavailable */ +#define SXGBE_DMA_INT_ENA_RSE BIT(8) /* Receive Stopped */ +#define SXGBE_DMA_INT_ENA_FBE BIT(12) /* Fatal Bus Error */ +#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */ + +#define SXGBE_DMA_INT_ABNORMAL \ + (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE | \ + SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE | \ + SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE) + +#define SXGBE_DMA_ENA_INT (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL) + +/* DMA channel interrupt status specific */ +#define SXGBE_DMA_INT_STATUS_REB2 BIT(21) +#define SXGBE_DMA_INT_STATUS_REB1 BIT(20) +#define SXGBE_DMA_INT_STATUS_REB0 BIT(19) +#define SXGBE_DMA_INT_STATUS_TEB2 BIT(18) +#define SXGBE_DMA_INT_STATUS_TEB1 BIT(17) +#define SXGBE_DMA_INT_STATUS_TEB0 BIT(16) +#define SXGBE_DMA_INT_STATUS_NIS BIT(15) +#define SXGBE_DMA_INT_STATUS_AIS BIT(14) +#define SXGBE_DMA_INT_STATUS_CTXTERR BIT(13) +#define SXGBE_DMA_INT_STATUS_FBE BIT(12) +#define SXGBE_DMA_INT_STATUS_RPS BIT(8) +#define SXGBE_DMA_INT_STATUS_RBU BIT(7) +#define SXGBE_DMA_INT_STATUS_RI BIT(6) +#define SXGBE_DMA_INT_STATUS_TBU BIT(2) +#define SXGBE_DMA_INT_STATUS_TPS BIT(1) +#define SXGBE_DMA_INT_STATUS_TI BIT(0) + +#endif /* __SXGBE_REGMAP_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c new file mode 100644 index 000000000000..51c32194ba88 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c @@ -0,0 +1,91 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam <siva.kallam@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include "sxgbe_common.h" +#include "sxgbe_xpcs.h" + +static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg) +{ + u32 value; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + value = readl(priv->ioaddr + XPCS_OFFSET + reg); + + return value; +} + +static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data) +{ + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + writel(data, priv->ioaddr + XPCS_OFFSET + reg); + + return 0; +} + +int sxgbe_xpcs_init(struct net_device *ndev) +{ + u32 value; + + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + /* 10G XAUI mode */ + sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); + sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); + sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13)); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); + + do { + value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); + } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE); + + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); + + do { + value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); + } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); + + return 0; +} + +int sxgbe_xpcs_init_1G(struct net_device *ndev) +{ + int value; + + /* 10GBASE-X PCS (1G) mode */ + sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); + sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13)); + + value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); + sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6)); + sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13)); + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); + + do { + value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); + } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); + + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); + + /* Auto Negotiation cluase 37 enable */ + value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); + sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12)); + + return 0; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h new file mode 100644 index 000000000000..6b26a50724d3 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h @@ -0,0 +1,38 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Byungho An <bh74.an@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_XPCS_H__ +#define __SXGBE_XPCS_H__ + +/* XPCS Registers */ +#define XPCS_OFFSET 0x1A060000 +#define SR_PCS_MMD_CONTROL1 0x030000 +#define SR_PCS_CONTROL2 0x030007 +#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004 +#define VR_PCS_MMD_DIGITAL_STATUS 0x038010 +#define SR_MII_MMD_CONTROL 0x1F0000 +#define SR_MII_MMD_AN_ADV 0x1F0004 +#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005 +#define VR_MII_MMD_AN_CONTROL 0x1F8001 +#define VR_MII_MMD_AN_INT_STATUS 0x1F8002 + +#define XPCS_QSEQ_STATE_STABLE 0x10 +#define XPCS_QSEQ_STATE_MPLLOFF 0x1c +#define XPCS_TYPE_SEL_R 0x00 +#define XPCS_TYPE_SEL_X 0x01 +#define XPCS_TYPE_SEL_W 0x02 +#define XPCS_XAUI_MODE 0x00 +#define XPCS_RXAUI_MODE 0x01 + +int sxgbe_xpcs_init(struct net_device *ndev); +int sxgbe_xpcs_init_1G(struct net_device *ndev); + +#endif /* __SXGBE_XPCS_H__ */ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 174a92f5fe51..21c20ea0dad0 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -162,8 +162,8 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) return -EIO; - memcpy(mac_address, - MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN); + ether_addr_copy(mac_address, + MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); return 0; } @@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx) struct efx_ef10_nic_data *nic_data; int i, rc; - /* We can have one VI for each 8K region. However we need - * multiple TX queues per channel. + /* We can have one VI for each 8K region. However, until we + * use TX option descriptors we need two TX queues per channel. */ efx->max_channels = min_t(unsigned int, @@ -565,10 +565,17 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * several of each (in fact that's the only option if host * page size is >4K). So we may allocate some extra VIs just * for writing PIO buffers through. + * + * The UC mapping contains (min_vis - 1) complete VIs and the + * first half of the next VI. Then the WC mapping begins with + * the second half of this last VI. */ uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF); if (nic_data->n_piobufs) { + /* pio_write_vi_base rounds down to give the number of complete + * VIs inside the UC mapping. + */ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + nic_data->n_piobufs) * @@ -1955,6 +1962,9 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota) int tx_descs = 0; int spent = 0; + if (quota <= 0) + return spent; + read_ptr = channel->eventq_read_ptr; for (;;) { @@ -3145,12 +3155,10 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) table->dev_uc_count = -1; } else { table->dev_uc_count = 1 + netdev_uc_count(net_dev); - memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr, - ETH_ALEN); + ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); i = 1; netdev_for_each_uc_addr(uc, net_dev) { - memcpy(table->dev_uc_list[i].addr, - uc->addr, ETH_ALEN); + ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); i++; } } @@ -3162,8 +3170,7 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) eth_broadcast_addr(table->dev_mc_list[0].addr); i = 1; netdev_for_each_mc_addr(mc, net_dev) { - memcpy(table->dev_mc_list[i].addr, - mc->addr, ETH_ALEN); + ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); i++; } } diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h index 207ac9a1e3de..62a55dde61d5 100644 --- a/drivers/net/ethernet/sfc/ef10_regs.h +++ b/drivers/net/ethernet/sfc/ef10_regs.h @@ -227,36 +227,6 @@ #define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 #define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 -/* RX_USER_DESC */ -#define ESF_DZ_RX_USR_RESERVED_LBN 62 -#define ESF_DZ_RX_USR_RESERVED_WIDTH 2 -#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48 -#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14 -#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44 -#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4 -#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10 -#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8 -#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4 -#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0 -#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0 -#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44 -#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12 -#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32 -#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16 -#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28 -#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20 -#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24 -#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22 -#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22 -#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22 -#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20 -#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16 -#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12 - /* TX_CSUM_TSTAMP_DESC */ #define ESF_DZ_TX_DESC_IS_OPT_LBN 63 #define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 @@ -338,37 +308,6 @@ #define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 -/* TX_USER_DESC */ -#define ESF_DZ_TX_USR_TYPE_LBN 63 -#define ESF_DZ_TX_USR_TYPE_WIDTH 1 -#define ESF_DZ_TX_USR_CONT_LBN 62 -#define ESF_DZ_TX_USR_CONT_WIDTH 1 -#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48 -#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14 -#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44 -#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4 -#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10 -#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8 -#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4 -#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0 -#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0 -#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44 -#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12 -#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32 -#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16 -#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28 -#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20 -#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24 -#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22 -#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22 -#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22 -#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20 -#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16 -#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0 -#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12 /*************************************************************************/ /* TX_DESC_UPD_REG: Transmit descriptor update register. diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 83d464347021..57b971e5e6b2 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel) goto fail; } - channel->n_rx_frm_trunc = 0; - return 0; fail: @@ -1014,7 +1012,7 @@ static int efx_probe_port(struct efx_nic *efx) return rc; /* Initialise MAC address to permanent address */ - memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN); + ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); return 0; } @@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx) for (i = 0; i < n_channels; i++) xentries[i].entry = i; - rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); - if (rc > 0) { + rc = pci_enable_msix_range(efx->pci_dev, + xentries, 1, n_channels); + if (rc < 0) { + /* Fall back to single channel MSI */ + efx->interrupt_mode = EFX_INT_MODE_MSI; + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X\n"); + } else if (rc < n_channels) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors" " available (%d < %u).\n", rc, n_channels); netif_err(efx, drv, efx->net_dev, "WARNING: Performance may be reduced.\n"); - EFX_BUG_ON_PARANOID(rc >= n_channels); n_channels = rc; - rc = pci_enable_msix(efx->pci_dev, xentries, - n_channels); } - if (rc == 0) { + if (rc > 0) { efx->n_channels = n_channels; if (n_channels > extra_channels) n_channels -= extra_channels; @@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx) for (i = 0; i < efx->n_channels; i++) efx_get_channel(efx, i)->irq = xentries[i].vector; - } else { - /* Fall back to single channel MSI */ - efx->interrupt_mode = EFX_INT_MODE_MSI; - netif_err(efx, drv, efx->net_dev, - "could not enable MSI-X\n"); } } @@ -1603,6 +1599,8 @@ static int efx_probe_nic(struct efx_nic *efx) if (rc) goto fail1; + efx_set_channels(efx); + rc = efx->type->dimension_resources(efx); if (rc) goto fail2; @@ -1613,7 +1611,6 @@ static int efx_probe_nic(struct efx_nic *efx) efx->rx_indir_table[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); - efx_set_channels(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); @@ -2115,7 +2112,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) { struct efx_nic *efx = netdev_priv(net_dev); struct sockaddr *addr = data; - char *new_addr = addr->sa_data; + u8 *new_addr = addr->sa_data; if (!is_valid_ether_addr(new_addr)) { netif_err(efx, drv, efx->net_dev, @@ -2124,7 +2121,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) return -EADDRNOTAVAIL; } - memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); + ether_addr_copy(net_dev->dev_addr, new_addr); efx_sriov_mac_address_changed(efx); /* Reconfigure the MAC */ @@ -3273,6 +3270,6 @@ module_exit(efx_exit_module); MODULE_AUTHOR("Solarflare Communications and " "Michael Brown <mbrown@fensystems.co.uk>"); -MODULE_DESCRIPTION("Solarflare Communications network driver"); +MODULE_DESCRIPTION("Solarflare network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index dbd7b78fe01c..99032581336f 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -14,7 +14,7 @@ #include "net_driver.h" #include "filter.h" -/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ +/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ #define EFX_MEM_BAR 2 /* TX */ diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 229428915aa8..0de8b07c24c2 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, * @test_index: Starting index of the test * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL + * + * Fill in a block of loopback self-test entries. Return new test + * index. */ static int efx_fill_loopback_test(struct efx_nic *efx, struct efx_loopback_self_tests *lb_tests, @@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx, * @tests: Efx self-test results structure, or %NULL * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL + * + * Get self-test number of strings, strings, and/or test results. + * Return number of strings (== number of test results). + * + * The reason for merging these three functions is to make sure that + * they can never be inconsistent. */ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, struct efx_self_tests *tests, @@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_self_tests *efx_tests; - int already_up; + bool already_up; int rc = -ENOMEM; efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); @@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev, goto fail; if (efx->state != STATE_READY) { - rc = -EIO; - goto fail1; + rc = -EBUSY; + goto out; } netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", @@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); - goto fail1; + goto out; } } @@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev, rc == 0 ? "passed" : "failed", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); -fail1: - /* Fill ethtool results structures */ +out: efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); kfree(efx_tests); fail: @@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev, pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); } - static void efx_ethtool_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { @@ -720,7 +727,7 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) } /* MAC address mask including only I/G bit */ -static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; +static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; #define IP4_ADDR_FULL_MASK ((__force __be32)~0) #define PORT_FULL_MASK ((__force __be16)~0) @@ -780,16 +787,16 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, rule->flow_type = ETHER_FLOW; if (spec.match_flags & (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { - memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN); + ether_addr_copy(mac_entry->h_dest, spec.loc_mac); if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) - memset(mac_mask->h_dest, ~0, ETH_ALEN); + eth_broadcast_addr(mac_mask->h_dest); else - memcpy(mac_mask->h_dest, mac_addr_ig_mask, - ETH_ALEN); + ether_addr_copy(mac_mask->h_dest, + mac_addr_ig_mask); } if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { - memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN); - memset(mac_mask->h_source, ~0, ETH_ALEN); + ether_addr_copy(mac_entry->h_source, spec.rem_mac); + eth_broadcast_addr(mac_mask->h_source); } if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { mac_entry->h_proto = spec.ether_type; @@ -961,13 +968,13 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; else return -EINVAL; - memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN); + ether_addr_copy(spec.loc_mac, mac_entry->h_dest); } if (!is_zero_ether_addr(mac_mask->h_source)) { if (!is_broadcast_ether_addr(mac_mask->h_source)) return -EINVAL; spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; - memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN); + ether_addr_copy(spec.rem_mac, mac_entry->h_source); } if (mac_mask->h_proto) { if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 18d6f761f4d0..8ec20b713cc6 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS); } - static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) { struct efx_nic *efx = dev_id; @@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) efx_schedule_channel_irq(efx_get_channel(efx, 1)); return IRQ_HANDLED; } + /************************************************************************** * * RSS @@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) case 100: link_speed = 1; break; default: link_speed = 0; break; } + /* MAC_LINK_STATUS controls MAC backpressure but doesn't work * as advertised. Disable to ensure packets are not * indefinitely held and TX queue can be flushed at any point @@ -2182,7 +2183,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) } /* Read the MAC addresses */ - memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN); + ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]); netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); @@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = { .mcdi_max_ver = -1, .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, }; - diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index f72489a105ca..a08761360cdf 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, */ void efx_farch_tx_write(struct efx_tx_queue *tx_queue) { - struct efx_tx_buffer *buffer; efx_qword_t *txd; unsigned write_ptr; @@ -1249,6 +1248,9 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) int tx_packets = 0; int spent = 0; + if (budget <= 0) + return spent; + read_ptr = channel->eventq_read_ptr; for (;;) { @@ -1609,7 +1611,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } - /* Setup RSS indirection table. * This maps from the hash value of the packet to RXQ */ diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 3ef298d3c47e..d0ed7f71ea7e 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -243,7 +243,7 @@ static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec, } if (addr != NULL) { spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; - memcpy(spec->loc_mac, addr, ETH_ALEN); + ether_addr_copy(spec->loc_mac, addr); } return 0; } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index eb59abb57e85..7bd4b14bf3b3 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1187,6 +1187,9 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, int rc; BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); + /* we need __aligned(2) for ether_addr_copy */ + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, outbuf, sizeof(outbuf), &outlen); @@ -1199,11 +1202,10 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, } if (mac_address) - memcpy(mac_address, - port_num ? - MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : - MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0), - ETH_ALEN); + ether_addr_copy(mac_address, + port_num ? + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0)); if (fw_subtype_list) { for (i = 0; i < MCDI_VAR_ARRAY_LEN(outlen, @@ -1532,7 +1534,7 @@ static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, MC_CMD_FILTER_MODE_SIMPLE); - memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); + ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 91d23252f8fa..e5fc4e1574b5 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -854,8 +854,8 @@ int efx_mcdi_set_mac(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); - memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), - efx->net_dev->dev_addr, ETH_ALEN); + ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), + efx->net_dev->dev_addr); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index af2b8c59a903..8a400a0595eb 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, return &rx_queue->buffer[index]; } - /** * EFX_MAX_FRAME_LEN - calculate maximum frame length * diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 79226b19e3c4..32d969e857f7 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; } - diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index d7a36829649a..6b861e3de4b0 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -223,7 +223,6 @@ struct efx_ptp_timeset { * @evt_list: List of MC receive events awaiting packets * @evt_free_list: List of free events * @evt_lock: Lock for manipulating evt_list and evt_free_list - * @evt_overflow: Boolean indicating that event list has overflowed * @rx_evts: Instantiated events (on evt_list and evt_free_list) * @workwq: Work queue for processing pending PTP operations * @work: Work task @@ -275,7 +274,6 @@ struct efx_ptp_data { struct list_head evt_list; struct list_head evt_free_list; spinlock_t evt_lock; - bool evt_overflow; struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; struct workqueue_struct *workwq; struct work_struct work; @@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), return -EAGAIN; } - /* Convert the NIC time into kernel time. No correction is required- - * this time is the output of a firmware process. - */ - mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major, - ptp->timeset[last_good].minor, 0); - - /* Calculate delay from actual PPS to last_time */ - delta = ktime_to_timespec(mc_time); - delta.tv_nsec += - last_time->ts_real.tv_nsec - - (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); - - /* It is possible that the seconds rolled over between taking + /* Calculate delay from last good sync (host time) to last_time. + * It is possible that the seconds rolled over between taking * the start reading and the last value written by the host. The * timescales are such that a gap of more than one second is never - * expected. + * expected. delta is *not* normalised. */ start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; - if (start_sec != last_sec) { - if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) { - netif_warn(efx, hw, efx->net_dev, - "PTP bad synchronisation seconds\n"); - return -EAGAIN; - } else { - delta.tv_sec = 1; - } - } else { - delta.tv_sec = 0; + if (start_sec != last_sec && + ((start_sec + 1) & MC_SECOND_MASK) != last_sec) { + netif_warn(efx, hw, efx->net_dev, + "PTP bad synchronisation seconds\n"); + return -EAGAIN; } + delta.tv_sec = (last_sec - start_sec) & 1; + delta.tv_nsec = + last_time->ts_real.tv_nsec - + (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); + + /* Convert the NIC time at last good sync into kernel time. + * No correction is required - this time is the output of a + * firmware process. + */ + mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major, + ptp->timeset[last_good].minor, 0); + + /* Calculate delay from NIC top of second to last_time */ + delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; + /* Set PPS timestamp to match NIC top of second */ ptp->host_time_pps = *last_time; pps_sub_ts(&ptp->host_time_pps, delta); @@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) } } } - /* If the event overflow flag is set and the event list is now empty - * clear the flag to re-enable the overflow warning message. - */ - if (ptp->evt_overflow && list_empty(&ptp->evt_list)) - ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); } @@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, break; } } - /* If the event overflow flag is set and the event list is now empty - * clear the flag to re-enable the overflow warning message. - */ - if (ptp->evt_overflow && list_empty(&ptp->evt_list)) - ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); return rc; @@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx) list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { list_move(cursor, &efx->ptp_data->evt_free_list); } - ptp->evt_overflow = false; spin_unlock_bh(&efx->ptp_data->evt_lock); return rc; @@ -1208,6 +1194,7 @@ static const struct ptp_clock_info efx_phc_clock_info = { .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, + .n_pins = 0, .pps = 1, .adjfreq = efx_phc_adjfreq, .adjtime = efx_phc_adjtime, @@ -1253,7 +1240,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) spin_lock_init(&ptp->evt_lock); for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); - ptp->evt_overflow = false; /* Get the NIC PTP attributes and set up time conversions */ rc = efx_ptp_get_attributes(efx); @@ -1380,6 +1366,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; u8 *match_data_012, *match_data_345; unsigned int version; + u8 *data; match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); @@ -1388,7 +1375,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { return false; } - version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); + data = skb->data; + version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]); if (version != PTP_VERSION_V1) { return false; } @@ -1396,13 +1384,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) /* PTP V1 uses all six bytes of the UUID to match the packet * to the timestamp */ - match_data_012 = skb->data + PTP_V1_UUID_OFFSET; - match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; + match_data_012 = data + PTP_V1_UUID_OFFSET; + match_data_345 = data + PTP_V1_UUID_OFFSET + 3; } else { if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { return false; } - version = skb->data[PTP_V2_VERSION_OFFSET]; + data = skb->data; + version = data[PTP_V2_VERSION_OFFSET]; if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { return false; } @@ -1414,17 +1403,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) * enhanced mode fixes this issue and uses bytes 0-2 * and byte 5-7 of the UUID. */ - match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5; + match_data_345 = data + PTP_V2_UUID_OFFSET + 5; if (ptp->mode == MC_CMD_PTP_MODE_V2) { - match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2; + match_data_012 = data + PTP_V2_UUID_OFFSET + 2; } else { - match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0; + match_data_012 = data + PTP_V2_UUID_OFFSET + 0; BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); } } /* Does this packet require timestamping? */ - if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { + if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { match->state = PTP_PACKET_STATE_UNMATCHED; /* We expect the sequence number to be in the same position in @@ -1440,8 +1429,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) (match_data_345[0] << 24)); match->words[1] = (match_data_345[1] | (match_data_345[2] << 8) | - (skb->data[PTP_V1_SEQUENCE_OFFSET + - PTP_V1_SEQUENCE_LENGTH - 1] << + (data[PTP_V1_SEQUENCE_OFFSET + + PTP_V1_SEQUENCE_LENGTH - 1] << 16)); } else { match->state = PTP_PACKET_STATE_MATCH_UNWANTED; @@ -1635,13 +1624,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) list_add_tail(&evt->link, &ptp->evt_list); queue_work(ptp->workwq, &ptp->work); - } else if (!ptp->evt_overflow) { - /* Log a warning message and set the event overflow flag. - * The message won't be logged again until the event queue - * becomes empty. - */ + } else if (net_ratelimit()) { + /* Log a rate-limited warning message. */ netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); - ptp->evt_overflow = true; } spin_unlock_bh(&ptp->evt_lock); } diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 26641817a9c7..0fc5baef45b1 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -50,7 +50,7 @@ struct efx_loopback_payload { } __packed; /* Loopback test source MAC address */ -static const unsigned char payload_source[ETH_ALEN] = { +static const u8 payload_source[ETH_ALEN] __aligned(2) = { 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, }; @@ -366,8 +366,8 @@ static void efx_iterate_state(struct efx_nic *efx) struct efx_loopback_payload *payload = &state->payload; /* Initialise the layerII header */ - memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); - memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); + ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); + ether_addr_copy((u8 *)&payload->header.h_source, payload_source); payload->header.h_proto = htons(ETH_P_IP); /* saddr set later and used as incrementing count */ diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 0c38f926871e..9a9205e77896 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1095,7 +1095,7 @@ static void efx_sriov_peer_work(struct work_struct *data) /* Fill the remaining addresses */ list_for_each_entry(local_addr, &efx->local_addr_list, link) { - memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN); + ether_addr_copy(peer->mac_addr, local_addr->addr); peer->tci = 0; ++peer; ++peer_count; @@ -1303,8 +1303,7 @@ int efx_sriov_init(struct efx_nic *efx) goto fail_vfs; rtnl_lock(); - memcpy(vfdi_status->peers[0].mac_addr, - net_dev->dev_addr, ETH_ALEN); + ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr); efx->vf_init_count = efx->vf_count; rtnl_unlock(); @@ -1452,8 +1451,8 @@ void efx_sriov_mac_address_changed(struct efx_nic *efx) if (!efx->vf_init_count) return; - memcpy(vfdi_status->peers[0].mac_addr, - efx->net_dev->dev_addr, ETH_ALEN); + ether_addr_copy(vfdi_status->peers[0].mac_addr, + efx->net_dev->dev_addr); queue_work(vfdi_workqueue, &efx->peer_work); } @@ -1570,7 +1569,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) vf = efx->vf + vf_i; mutex_lock(&vf->status_lock); - memcpy(vf->addr.mac_addr, mac, ETH_ALEN); + ether_addr_copy(vf->addr.mac_addr, mac); __efx_sriov_update_vf_addr(vf); mutex_unlock(&vf->status_lock); @@ -1633,7 +1632,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, vf = efx->vf + vf_i; ivi->vf = vf_i; - memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN); + ether_addr_copy(ivi->mac, vf->addr.mac_addr); ivi->tx_rate = 0; tci = ntohs(vf->addr.tci); ivi->vlan = tci & VLAN_VID_MASK; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 75d11fa4eb0a..fa9475300411 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) * Requires TX checksum offload support. */ -/* Number of bytes inserted at the start of a TSO header buffer, - * similar to NET_IP_ALIGN. - */ -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#define TSOH_OFFSET 0 -#else -#define TSOH_OFFSET NET_IP_ALIGN -#endif - #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) /** @@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(buffer->flags); EFX_BUG_ON_PARANOID(buffer->unmap_len); - if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { + if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) { unsigned index = (tx_queue->insert_count & tx_queue->ptr_mask) / 2; struct efx_buffer *page_buf = &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; unsigned offset = - TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; + TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN; if (unlikely(!page_buf->addr) && efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, @@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, } else { tx_queue->tso_long_headers++; - buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); + buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC); if (unlikely(!buffer->heap_buf)) return NULL; - result = (u8 *)buffer->heap_buf + TSOH_OFFSET; + result = (u8 *)buffer->heap_buf + NET_IP_ALIGN; buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; } @@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) static int tso_start(struct tso_state *st, struct efx_nic *efx, const struct sk_buff *skb) { - bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; + bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; struct device *dma_dev = &efx->pci_dev->dev; unsigned int header_len, in_len; dma_addr_t dma_addr; @@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx, st->out_len = skb->len - header_len; - if (!use_options) { + if (!use_opt_desc) { st->header_unmap_len = 0; if (likely(in_len == 0)) { diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index 5eb933c97bba..7daa7d433099 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -987,7 +987,7 @@ out_unlock: spin_unlock(&priv->lock); out: - dev_kfree_skb(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index ff57a46388ee..6072f093e6b4 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1614,7 +1614,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) skb->data, skb->len, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, sis_priv->tx_ring[entry].bufptr))) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); sis_priv->tx_skbuff[entry] = NULL; net_dev->stats.tx_dropped++; spin_unlock_irqrestore(&sis_priv->lock, flags); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index c50fb08c9905..66b05e62f70a 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -551,7 +551,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_errors++; dev->stats.tx_dropped++; spin_unlock_irqrestore(&lp->lock, flags); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 839c0e6cca01..d1b4dca53a9d 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -621,7 +621,7 @@ static void smc_hardware_send_pkt(unsigned long data) done: if (!THROTTLE_TX_PKTS) netif_wake_queue(dev); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } /* @@ -657,7 +657,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_warn(dev, "Far too big packet error.\n"); dev->stats.tx_errors++; dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 6382b7c416f4..a0fc151da40d 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -439,7 +439,8 @@ static int smsc911x_request_resources(struct platform_device *pdev) /* Request clock */ pdata->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) - netdev_warn(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk)); + dev_dbg(&pdev->dev, "couldn't get clock %li\n", + PTR_ERR(pdata->clk)); return ret; } @@ -1672,7 +1673,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); freespace -= (skb->len + 32); skb_tx_timestamp(skb); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) smsc911x_tx_update_txcounters(dev); @@ -2379,8 +2380,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev) int res_size, irq_flags; int retval; - pr_info("Driver version %s\n", SMSC_DRV_VERSION); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smsc911x-memory"); if (!res) diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index f2d7c702c77f..2d09c116cbc8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -26,6 +26,16 @@ config STMMAC_PLATFORM If unsure, say N. +config DWMAC_SOCFPGA + bool "SOCFPGA dwmac support" + depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST) + help + Support for ethernet controller on Altera SOCFPGA + + This selects the Altera SOCFPGA SoC glue layer support + for the stmmac device driver. This driver is used for + arria5 and cyclone5 FPGA SoCs. + config DWMAC_SUNXI bool "Allwinner GMAC support" depends on STMMAC_PLATFORM && ARCH_SUNXI diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index dcef28775dad..18695ebef7e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -3,6 +3,7 @@ stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o +stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 72d282bf33a5..c553f6b5a913 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) sizeof(struct dma_desc))); } -const struct stmmac_chain_mode_ops chain_mode_ops = { +const struct stmmac_mode_ops chain_mode_ops = { .init = stmmac_init_dma_chain, .is_jumbo_frm = stmmac_is_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm, diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7834a3993946..74610f3aca9e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -419,20 +419,13 @@ struct mii_regs { unsigned int data; /* MII Data */ }; -struct stmmac_ring_mode_ops { - unsigned int (*is_jumbo_frm) (int len, int ehn_desc); - unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); - void (*refill_desc3) (void *priv, struct dma_desc *p); - void (*init_desc3) (struct dma_desc *p); - void (*clean_desc3) (void *priv, struct dma_desc *p); - int (*set_16kib_bfsize) (int mtu); -}; - -struct stmmac_chain_mode_ops { +struct stmmac_mode_ops { void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc); unsigned int (*is_jumbo_frm) (int len, int ehn_desc); unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); + int (*set_16kib_bfsize)(int mtu); + void (*init_desc3)(struct dma_desc *p); void (*refill_desc3) (void *priv, struct dma_desc *p); void (*clean_desc3) (void *priv, struct dma_desc *p); }; @@ -441,8 +434,7 @@ struct mac_device_info { const struct stmmac_ops *mac; const struct stmmac_desc_ops *desc; const struct stmmac_dma_ops *dma; - const struct stmmac_ring_mode_ops *ring; - const struct stmmac_chain_mode_ops *chain; + const struct stmmac_mode_ops *mode; const struct stmmac_hwtimestamp *ptp; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; @@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, void stmmac_set_mac(void __iomem *ioaddr, bool enable); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); -extern const struct stmmac_ring_mode_ops ring_mode_ops; -extern const struct stmmac_chain_mode_ops chain_mode_ops; +extern const struct stmmac_mode_ops ring_mode_ops; +extern const struct stmmac_mode_ops chain_mode_ops; #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c new file mode 100644 index 000000000000..fd8a217556a1 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -0,0 +1,130 @@ +/* Copyright Altera Corporation (C) 2014. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Adopted from dwmac-sti.c + */ + +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/regmap.h> +#include <linux/stmmac.h> + +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 + +struct socfpga_dwmac { + int interface; + u32 reg_offset; + u32 reg_shift; + struct device *dev; + struct regmap *sys_mgr_base_addr; +}; + +static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) +{ + struct device_node *np = dev->of_node; + struct regmap *sys_mgr_base_addr; + u32 reg_offset, reg_shift; + int ret; + + dwmac->interface = of_get_phy_mode(np); + + sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); + if (IS_ERR(sys_mgr_base_addr)) { + dev_info(dev, "No sysmgr-syscon node found\n"); + return PTR_ERR(sys_mgr_base_addr); + } + + ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, ®_offset); + if (ret) { + dev_info(dev, "Could not read reg_offset from sysmgr-syscon!\n"); + return -EINVAL; + } + + ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, ®_shift); + if (ret) { + dev_info(dev, "Could not read reg_shift from sysmgr-syscon!\n"); + return -EINVAL; + } + + dwmac->reg_offset = reg_offset; + dwmac->reg_shift = reg_shift; + dwmac->sys_mgr_base_addr = sys_mgr_base_addr; + dwmac->dev = dev; + + return 0; +} + +static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) +{ + struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; + int phymode = dwmac->interface; + u32 reg_offset = dwmac->reg_offset; + u32 reg_shift = dwmac->reg_shift; + u32 ctrl, val; + + switch (phymode) { + case PHY_INTERFACE_MODE_RGMII: + val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; + break; + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; + break; + default: + dev_err(dwmac->dev, "bad phy mode %d\n", phymode); + return -EINVAL; + } + + regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); + ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); + ctrl |= val << reg_shift; + + regmap_write(sys_mgr_base_addr, reg_offset, ctrl); + return 0; +} + +static void *socfpga_dwmac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + struct socfpga_dwmac *dwmac; + + dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return ERR_PTR(-ENOMEM); + + ret = socfpga_dwmac_parse_data(dwmac, dev); + if (ret) { + dev_err(dev, "Unable to parse OF data\n"); + return ERR_PTR(ret); + } + + ret = socfpga_dwmac_setup(dwmac); + if (ret) { + dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); + return ERR_PTR(ret); + } + + return dwmac; +} + +const struct stmmac_of_data socfpga_gmac_data = { + .setup = socfpga_dwmac_probe, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a96c7c2f5f3f..650a4be6bce5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; - if (unlikely(priv->plat->has_gmac)) - /* Fill DES3 in case of RING mode */ - if (priv->dma_buf_sz >= BUF_SIZE_8KiB) - p->des3 = p->des2 + BUF_SIZE_8KiB; + /* Fill DES3 in case of RING mode */ + if (priv->dma_buf_sz >= BUF_SIZE_8KiB) + p->des3 = p->des2 + BUF_SIZE_8KiB; } /* In ring mode we need to fill the desc3 because it is used as buffer */ @@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu) return ret; } -const struct stmmac_ring_mode_ops ring_mode_ops = { +const struct stmmac_mode_ops ring_mode_ops = { .is_jumbo_frm = stmmac_is_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm, .refill_desc3 = stmmac_refill_desc3, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f9e60d7918c4..ca01035634a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -136,6 +136,9 @@ extern const struct stmmac_of_data sun7i_gmac_data; #ifdef CONFIG_DWMAC_STI extern const struct stmmac_of_data sti_gmac_data; #endif +#ifdef CONFIG_DWMAC_SOCFPGA +extern const struct stmmac_of_data socfpga_gmac_data; +#endif extern struct platform_driver stmmac_pltfr_driver; static inline int stmmac_register_platform(void) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 078ad0ec8593..d940034acdd4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -92,8 +92,8 @@ static int tc = TC_DEFAULT; module_param(tc, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(tc, "DMA threshold control value"); -#define DMA_BUFFER_SIZE BUF_SIZE_4KiB -static int buf_sz = DMA_BUFFER_SIZE; +#define DEFAULT_BUFSIZE 1536 +static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); @@ -136,8 +136,8 @@ static void stmmac_verify_args(void) dma_rxsize = DMA_RX_SIZE; if (unlikely(dma_txsize < 0)) dma_txsize = DMA_TX_SIZE; - if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) - buf_sz = DMA_BUFFER_SIZE; + if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) + buf_sz = DEFAULT_BUFSIZE; if (unlikely(flow_ctrl > 1)) flow_ctrl = FLOW_AUTO; else if (likely(flow_ctrl < 0)) @@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { + int tx_lpi_timer = priv->tx_lpi_timer; + /* Check if the PHY supports EEE */ - if (phy_init_eee(priv->phydev, 1)) + if (phy_init_eee(priv->phydev, 1)) { + /* To manage at run-time if the EEE cannot be supported + * anymore (for example because the lp caps have been + * changed). + * In that case the driver disable own timers. + */ + if (priv->eee_active) { + pr_debug("stmmac: disable EEE\n"); + del_timer_sync(&priv->eee_ctrl_timer); + priv->hw->mac->set_eee_timer(priv->ioaddr, 0, + tx_lpi_timer); + } + priv->eee_active = 0; goto out; - + } + /* Activate the EEE and start timers */ if (!priv->eee_active) { priv->eee_active = 1; init_timer(&priv->eee_ctrl_timer); @@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv) priv->hw->mac->set_eee_timer(priv->ioaddr, STMMAC_DEFAULT_LIT_LS, - priv->tx_lpi_timer); + tx_lpi_timer); } else /* Set HW EEE according to the speed */ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); - pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); + pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); ret = true; } @@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize) ret = BUF_SIZE_8KiB; else if (mtu >= BUF_SIZE_2KiB) ret = BUF_SIZE_4KiB; - else if (mtu >= DMA_BUFFER_SIZE) + else if (mtu > DEFAULT_BUFSIZE) ret = BUF_SIZE_2KiB; else - ret = DMA_BUFFER_SIZE; + ret = DEFAULT_BUFSIZE; return ret; } @@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, p->des2 = priv->rx_skbuff_dma[i]; - if ((priv->mode == STMMAC_RING_MODE) && + if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) - priv->hw->ring->init_desc3(p); + priv->hw->mode->init_desc3(p); return 0; } @@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev) unsigned int bfsize = 0; int ret = -ENOMEM; - /* Set the max buffer size according to the DESC mode - * and the MTU. Note that RING mode allows 16KiB bsize. - */ - if (priv->mode == STMMAC_RING_MODE) - bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); + if (priv->hw->mode->set_16kib_bfsize) + bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); @@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev) /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) { - priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, - rxsize, 1); - priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, - txsize, 1); + priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, + rxsize, 1); + priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, + txsize, 1); } else { - priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, - rxsize, 0); - priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, - txsize, 0); + priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, + rxsize, 0); + priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, + txsize, 0); } } @@ -1288,10 +1300,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = 0; } - priv->hw->ring->clean_desc3(priv, p); + priv->hw->mode->clean_desc3(priv, p); if (likely(skb != NULL)) { - dev_kfree_skb(skb); + dev_consume_skb_any(skb); priv->tx_skbuff[entry] = NULL; } @@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int nfrags = skb_shinfo(skb)->nr_frags; struct dma_desc *desc, *first; unsigned int nopaged_len = skb_headlen(skb); + unsigned int enh_desc = priv->plat->enh_desc; if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { if (!netif_queue_stopped(dev)) { @@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first = desc; /* To program the descriptors according to the size of the frame */ - if (priv->mode == STMMAC_RING_MODE) { - is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, - priv->plat->enh_desc); - if (unlikely(is_jumbo)) - entry = priv->hw->ring->jumbo_frm(priv, skb, - csum_insertion); - } else { - is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, - priv->plat->enh_desc); - if (unlikely(is_jumbo)) - entry = priv->hw->chain->jumbo_frm(priv, skb, - csum_insertion); - } + if (enh_desc) + is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); + if (likely(!is_jumbo)) { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion, priv->mode); - } else + } else { desc = first; + entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); + } for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) p->des2 = priv->rx_skbuff_dma[entry]; - priv->hw->ring->refill_desc3(priv, p); + priv->hw->mode->refill_desc3(priv, p); if (netif_msg_rx_status(priv)) pr_debug("\trefill entry #%d\n", entry); @@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) /* To use the chained or ring mode */ if (chain_mode) { - priv->hw->chain = &chain_mode_ops; + priv->hw->mode = &chain_mode_ops; pr_info(" Chain mode enabled\n"); priv->mode = STMMAC_CHAIN_MODE; } else { - priv->hw->ring = &ring_mode_ops; + priv->hw->mode = &ring_mode_ops; pr_info(" Ring mode enabled\n"); priv->mode = STMMAC_RING_MODE; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index c61bc72b8e90..46aef5108bea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -36,7 +36,10 @@ static const struct of_device_id stmmac_dt_ids[] = { #ifdef CONFIG_DWMAC_STI { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, - { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, + { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, +#endif +#ifdef CONFIG_DWMAC_SOCFPGA + { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, #endif /* SoC specific glue layers should come before generic bindings */ { .compatible = "st,spear600-gmac"}, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 7680581ebe12..b7ad3565566c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -164,6 +164,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, + .n_pins = 0, .pps = 0, .adjfreq = stmmac_adjust_freq, .adjtime = stmmac_adjust_time, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 8e2266e1f260..79606f47a08e 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map) struct msix_entry msi_vec[NIU_NUM_LDG]; struct niu_parent *parent = np->parent; struct pci_dev *pdev = np->pdev; - int i, num_irqs, err; + int i, num_irqs; u8 first_ldg; first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; @@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map) (np->port == 0 ? 3 : 1)); BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); -retry: for (i = 0; i < num_irqs; i++) { msi_vec[i].vector = 0; msi_vec[i].entry = i; } - err = pci_enable_msix(pdev, msi_vec, num_irqs); - if (err < 0) { + num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs); + if (num_irqs < 0) { np->flags &= ~NIU_FLAGS_MSIX; return; } - if (err > 0) { - num_irqs = err; - goto retry; - } np->flags |= NIU_FLAGS_MSIX; for (i = 0; i < num_irqs; i++) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index c2799dc46325..102a66fc54a2 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -688,7 +688,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st } dev->stats.tx_packets++; - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } gp->tx_old = entry; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ffd4d12acf6d..5d5fec6c4eb0 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -378,7 +378,6 @@ struct cpsw_priv { u32 version; u32 coal_intvl; u32 bus_freq_mhz; - struct net_device_stats stats; int rx_packet_max; int host_port; struct clk *clk; @@ -673,8 +672,8 @@ static void cpsw_tx_handler(void *token, int len, int status) if (unlikely(netif_queue_stopped(ndev))) netif_wake_queue(ndev); cpts_tx_timestamp(priv->cpts, skb); - priv->stats.tx_packets++; - priv->stats.tx_bytes += len; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); } @@ -700,10 +699,10 @@ static void cpsw_rx_handler(void *token, int len, int status) cpts_rx_timestamp(priv->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); - priv->stats.rx_bytes += len; - priv->stats.rx_packets++; + ndev->stats.rx_bytes += len; + ndev->stats.rx_packets++; } else { - priv->stats.rx_dropped++; + ndev->stats.rx_dropped++; new_skb = skb; } @@ -1313,7 +1312,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); - priv->stats.tx_dropped++; + ndev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -1337,7 +1336,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; fail: - priv->stats.tx_dropped++; + ndev->stats.tx_dropped++; netif_stop_queue(ndev); return NETDEV_TX_BUSY; } @@ -1477,7 +1476,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct cpsw_priv *priv = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(req); int slave_no = cpsw_slave_index(priv); if (!netif_running(dev)) @@ -1490,14 +1488,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) case SIOCGHWTSTAMP: return cpsw_hwtstamp_get(dev, req); #endif - case SIOCGMIIPHY: - data->phy_id = priv->slaves[slave_no].phy->addr; - break; - default: - return -ENOTSUPP; } - return 0; + if (!priv->slaves[slave_no].phy) + return -EOPNOTSUPP; + return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd); } static void cpsw_ndo_tx_timeout(struct net_device *ndev) @@ -1505,7 +1500,7 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) struct cpsw_priv *priv = netdev_priv(ndev); cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); - priv->stats.tx_errors++; + ndev->stats.tx_errors++; cpsw_intr_disable(priv); cpdma_ctlr_int_ctrl(priv->dma, false); cpdma_chan_stop(priv->txch); @@ -1544,12 +1539,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) return 0; } -static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - return &priv->stats; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void cpsw_ndo_poll_controller(struct net_device *ndev) { @@ -1642,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_tx_timeout = cpsw_ndo_tx_timeout, - .ndo_get_stats = cpsw_ndo_get_stats, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cpsw_ndo_poll_controller, @@ -2229,10 +2217,6 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - if (cpts_register(&pdev->dev, priv->cpts, - data->cpts_clock_mult, data->cpts_clock_shift)) - dev_err(priv->dev, "error registering cpts device\n"); - cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", &ss_res->start, ndev->irq); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 8c351f100aca..a3bbf59eaafd 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -31,10 +31,6 @@ #ifdef CONFIG_TI_CPTS -static struct sock_filter ptp_filter[] = { - PTP_FILTER -}; - #define cpts_read32(c, r) __raw_readl(&c->reg->r) #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r) @@ -217,6 +213,7 @@ static struct ptp_clock_info cpts_info = { .name = "CTPS timer", .max_adj = 1000000, .n_ext_ts = 0, + .n_pins = 0, .pps = 0, .adjfreq = cpts_ptp_adjfreq, .adjtime = cpts_ptp_adjtime, @@ -300,7 +297,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) u64 ns = 0; struct cpts_event *event; struct list_head *this, *next; - unsigned int class = sk_run_filter(skb, ptp_filter); + unsigned int class = ptp_classify_raw(skb); unsigned long flags; u16 seqid; u8 mtype; @@ -371,10 +368,6 @@ int cpts_register(struct device *dev, struct cpts *cpts, int err, i; unsigned long flags; - if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { - pr_err("cpts: bad ptp filter\n"); - return -EINVAL; - } cpts->info = cpts_info; cpts->clock = ptp_clock_register(&cpts->info, dev); if (IS_ERR(cpts->clock)) { diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 364d0c7952c0..88ef27067bf2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) int i; spin_lock_irqsave(&ctlr->lock, flags); - if (ctlr->state != CPDMA_STATE_ACTIVE) { + if (ctlr->state == CPDMA_STATE_TEARDOWN) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } @@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) unsigned timeout; spin_lock_irqsave(&chan->lock, flags); - if (chan->state != CPDMA_STATE_ACTIVE) { + if (chan->state == CPDMA_STATE_TEARDOWN) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index cd9b164a0434..8f0e69ce07ca 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev) struct device *emac_dev = &ndev->dev; u32 cnt; struct resource *res; - int ret; + int q, m, ret; + int res_num = 0, irq_num = 0; int i = 0; - int k = 0; struct emac_priv *priv = netdev_priv(ndev); pm_runtime_get(&priv->pdev->dev); @@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev) } /* Request IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, + res_num))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) { + dev_err(emac_dev, "Request IRQ %d\n", irq_num); + if (request_irq(irq_num, emac_irq, 0, ndev->name, + ndev)) { + dev_err(emac_dev, + "DaVinci EMAC: request_irq() failed\n"); + ret = -EBUSY; - while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { - for (i = res->start; i <= res->end; i++) { - if (devm_request_irq(&priv->pdev->dev, i, emac_irq, - 0, ndev->name, ndev)) goto rollback; + } } - k++; + res_num++; } + /* prepare counters for rollback in case of an error */ + res_num--; + irq_num--; /* Start/Enable EMAC hardware */ emac_hw_enable(priv); @@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev) return 0; -rollback: - - dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); - ret = -EBUSY; err: + emac_int_disable(priv); + napi_disable(&priv->napi); + +rollback: + for (q = res_num; q >= 0; q--) { + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); + /* at the first iteration, irq_num is already set to the + * right value + */ + if (q != res_num) + irq_num = res->end; + + for (m = irq_num; m >= res->start; m--) + free_irq(m, ndev); + } + cpdma_ctlr_stop(priv->dma); pm_runtime_put(&priv->pdev->dev); return ret; } @@ -1659,6 +1680,9 @@ err: */ static int emac_dev_stop(struct net_device *ndev) { + struct resource *res; + int i = 0; + int irq_num; struct emac_priv *priv = netdev_priv(ndev); struct device *emac_dev = &ndev->dev; @@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev) if (priv->phydev) phy_disconnect(priv->phydev); + /* Free IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) + free_irq(irq_num, priv->ndev); + i++; + } + if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 17503da9f7a5..7e1c91d41a87 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -659,6 +659,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget) struct info_mpipe *info_mpipe = container_of(napi, struct info_mpipe, napi); + if (budget <= 0) + goto done; + instance = info_mpipe->instance; while ((n = gxio_mpipe_iqueue_try_peek( &info_mpipe->iqueue, @@ -870,6 +873,7 @@ static struct ptp_clock_info ptp_mpipe_caps = { .name = "mPIPE clock", .max_adj = 999999999, .n_ext_ts = 0, + .n_pins = 0, .pps = 0, .adjfreq = ptp_mpipe_adjfreq, .adjtime = ptp_mpipe_adjtime, diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index edb2e12a0fe2..e5a5c5d4ce0c 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -831,6 +831,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget) unsigned int work = 0; + if (budget <= 0) + goto done; + while (priv->active) { int index = qup->__packet_receive_read; if (index == qsp->__packet_receive_queue.__packet_write) @@ -1821,7 +1824,7 @@ busy: /* Handle completions. */ for (i = 0; i < nolds; i++) - kfree_skb(olds[i]); + dev_consume_skb_any(olds[i]); /* Update stats. */ u64_stats_update_begin(&stats->syncp); @@ -2005,7 +2008,7 @@ busy: /* Handle completions. */ for (i = 0; i < nolds; i++) - kfree_skb(olds[i]); + dev_consume_skb_any(olds[i]); /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ u64_stats_update_begin(&stats->syncp); @@ -2068,14 +2071,14 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, cpu_stats = &priv->cpu[i]->stats; do { - start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); trx_packets = cpu_stats->rx_packets; ttx_packets = cpu_stats->tx_packets; trx_bytes = cpu_stats->rx_bytes; ttx_bytes = cpu_stats->tx_bytes; trx_errors = cpu_stats->rx_errors; trx_dropped = cpu_stats->rx_dropped; - } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); rx_packets += trx_packets; tx_packets += ttx_packets; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3f4a32e39d27..0282d0161859 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -860,7 +860,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) if (skb) { pci_unmap_single(card->pdev, buf_addr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } } return 0; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 88e9c73cebc0..fef5573dbfca 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1645,6 +1645,9 @@ static int tc35815_poll(struct napi_struct *napi, int budget) int received = 0, handled; u32 status; + if (budget <= 0) + return received; + spin_lock(&lp->rx_lock); status = tc_readl(&tr->Int_Src); do { diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ef312bc6b865..f61dc2b72bb2 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card!?\n"); - goto err_out; + goto err_out_pci_disable; } /* sanity check */ @@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (pci_resource_len(pdev, 1) < io_size)) { rc = -EIO; dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); - goto err_out; + goto err_out_pci_disable; } pioaddr = pci_resource_start(pdev, 0); @@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev = alloc_etherdev(sizeof(struct rhine_private)); if (!dev) { rc = -ENOMEM; - goto err_out; + goto err_out_pci_disable; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -1022,7 +1022,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* The chip-specific entries in the device structure. */ dev->netdev_ops = &rhine_netdev_ops; - dev->ethtool_ops = &netdev_ethtool_ops, + dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); @@ -1084,6 +1084,8 @@ err_out_free_res: pci_release_regions(pdev); err_out_free_netdev: free_netdev(dev); +err_out_pci_disable: + pci_disable_device(pdev); err_out: return rc; } @@ -1676,7 +1678,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, /* Must use alignment buffer. */ if (skb->len > PKT_BUF_SZ) { /* packet too long, drop it */ - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); rp->tx_skbuff[entry] = NULL; dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -1696,7 +1698,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, pci_map_single(rp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); rp->tx_skbuff_dma[entry] = 0; dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -1834,7 +1836,7 @@ static void rhine_tx(struct net_device *dev) rp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); } - dev_kfree_skb(rp->tx_skbuff[entry]); + dev_consume_skb_any(rp->tx_skbuff[entry]); rp->tx_skbuff[entry] = NULL; entry = (++rp->dirty_tx) % TX_RING_SIZE; } @@ -2070,16 +2072,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) netdev_stats_to_stats64(stats, &dev->stats); do { - start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); stats->rx_packets = rp->rx_stats.packets; stats->rx_bytes = rp->rx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); stats->tx_packets = rp->tx_stats.packets; stats->tx_bytes = rp->tx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); return stats; } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index ad61d26a44f3..de08e86db209 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2565,7 +2565,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, /* The hardware can handle at most 7 memory segments, so merge * the skb if there are more */ if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 0df36c6ec7f4..104d46f37969 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -641,11 +641,10 @@ static int w5100_hw_probe(struct platform_device *pdev) if (!mem) return -ENXIO; mem_size = resource_size(mem); - if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) - return -EBUSY; - priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); - if (!priv->base) - return -EBUSY; + + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); spin_lock_init(&priv->reg_lock); priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 71c27b3292f1..1f33c4c86c20 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -561,11 +561,10 @@ static int w5300_hw_probe(struct platform_device *pdev) if (!mem) return -ENXIO; mem_size = resource_size(mem); - if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) - return -EBUSY; - priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); - if (!priv->base) - return -EBUSY; + + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); spin_lock_init(&priv->reg_lock); priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index a4347508031c..fa193c4688da 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -771,8 +771,8 @@ static void ll_temac_recv(struct net_device *ndev) /* if we're doing rx csum offload, set it up */ if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && - (skb->protocol == __constant_htons(ETH_P_IP)) && - (skb->len > 64)) { + (skb->protocol == htons(ETH_P_IP)) && + (skb->len > 64)) { skb->csum = cur_p->app3 & 0xFFFF; skb->ip_summed = CHECKSUM_COMPLETE; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 4bfdf8c7ada0..7b0a73556264 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -756,7 +756,7 @@ static void axienet_recv(struct net_device *ndev) skb->ip_summed = CHECKSUM_UNNECESSARY; } } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == __constant_htons(ETH_P_IP) && + skb->protocol == htons(ETH_P_IP) && skb->len > 64) { skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 36052b98b3fc..0d87c67a5ff7 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -795,18 +795,6 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, } /** - * xemaclite_mdio_reset - Reset the mdio bus. - * @bus: Pointer to the MII bus - * - * This function is required(?) as per Documentation/networking/phy.txt. - * There is no reset in this device; this function always returns 0. - */ -static int xemaclite_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - -/** * xemaclite_mdio_setup - Register mii_bus for the Emaclite device * @lp: Pointer to the Emaclite device private data * @ofdev: Pointer to OF device structure @@ -861,7 +849,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus->name = "Xilinx Emaclite MDIO"; bus->read = xemaclite_mdio_read; bus->write = xemaclite_mdio_write; - bus->reset = xemaclite_mdio_reset; bus->parent = dev; bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ @@ -1037,7 +1024,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) skb_tx_timestamp(new_skb); dev->stats.tx_bytes += len; - dev_kfree_skb(new_skb); + dev_consume_skb_any(new_skb); return 0; } diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index 3f431019e615..b81bc9fca378 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig @@ -23,6 +23,7 @@ config IXP4XX_ETH tristate "Intel IXP4xx Ethernet support" depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR select PHYLIB + select NET_PTP_CLASSIFY ---help--- Say Y here if you want to use built-in Ethernet ports on IXP4xx processor. diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 25283f17d82f..f7e0f0f7c2e2 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -256,10 +256,6 @@ static int ports_open; static struct port *npe_port_tab[MAX_NPES]; static struct dma_pool *dma_pool; -static struct sock_filter ptp_filter[] = { - PTP_FILTER -}; - static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) { u8 *data = skb->data; @@ -267,7 +263,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) u16 *hi, *id; u32 lo; - if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4) + if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) return 0; offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; @@ -1413,11 +1409,6 @@ static int eth_init_one(struct platform_device *pdev) char phy_id[MII_BUS_ID_SIZE + 3]; int err; - if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { - pr_err("ixp4xx_eth: bad ptp filter\n"); - return -EINVAL; - } - if (!(dev = alloc_etherdev(sizeof(struct port)))) return -ENOMEM; |