summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 06:20:12 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 06:20:12 +0300
commitf5a8eb632b562bd9c16c389f5db3a5260fba4157 (patch)
tree82687234d772ff8f72a31e598fe16553885c56c9 /drivers/net/ethernet
parentc9297d284126b80c9cfd72c690e0da531c99fc48 (diff)
parentdd3b8c329aa270027fba61a02a12600972dc3983 (diff)
downloadlinux-f5a8eb632b562bd9c16c389f5db3a5260fba4157.tar.xz
Merge tag 'arch-removal' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pul removal of obsolete architecture ports from Arnd Bergmann: "This removes the entire architecture code for blackfin, cris, frv, m32r, metag, mn10300, score, and tile, including the associated device drivers. I have been working with the (former) maintainers for each one to ensure that my interpretation was right and the code is definitely unused in mainline kernels. Many had fond memories of working on the respective ports to start with and getting them included in upstream, but also saw no point in keeping the port alive without any users. In the end, it seems that while the eight architectures are extremely different, they all suffered the same fate: There was one company in charge of an SoC line, a CPU microarchitecture and a software ecosystem, which was more costly than licensing newer off-the-shelf CPU cores from a third party (typically ARM, MIPS, or RISC-V). It seems that all the SoC product lines are still around, but have not used the custom CPU architectures for several years at this point. In contrast, CPU instruction sets that remain popular and have actively maintained kernel ports tend to all be used across multiple licensees. [ See the new nds32 port merged in the previous commit for the next generation of "one company in charge of an SoC line, a CPU microarchitecture and a software ecosystem" - Linus ] The removal came out of a discussion that is now documented at https://lwn.net/Articles/748074/. Unlike the original plans, I'm not marking any ports as deprecated but remove them all at once after I made sure that they are all unused. Some architectures (notably tile, mn10300, and blackfin) are still being shipped in products with old kernels, but those products will never be updated to newer kernel releases. After this series, we still have a few architectures without mainline gcc support: - unicore32 and hexagon both have very outdated gcc releases, but the maintainers promised to work on providing something newer. At least in case of hexagon, this will only be llvm, not gcc. - openrisc, risc-v and nds32 are still in the process of finishing their support or getting it added to mainline gcc in the first place. They all have patched gcc-7.3 ports that work to some degree, but complete upstream support won't happen before gcc-8.1. Csky posted their first kernel patch set last week, their situation will be similar [ Palmer Dabbelt points out that RISC-V support is in mainline gcc since gcc-7, although gcc-7.3.0 is the recommended minimum - Linus ]" This really says it all: 2498 files changed, 95 insertions(+), 467668 deletions(-) * tag 'arch-removal' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (74 commits) MAINTAINERS: UNICORE32: Change email account staging: iio: remove iio-trig-bfin-timer driver tty: hvc: remove tile driver tty: remove bfin_jtag_comm and hvc_bfin_jtag drivers serial: remove tile uart driver serial: remove m32r_sio driver serial: remove blackfin drivers serial: remove cris/etrax uart drivers usb: Remove Blackfin references in USB support usb: isp1362: remove blackfin arch glue usb: musb: remove blackfin port usb: host: remove tilegx platform glue pwm: remove pwm-bfin driver i2c: remove bfin-twi driver spi: remove blackfin related host drivers watchdog: remove bfin_wdt driver can: remove bfin_can driver mmc: remove bfin_sdh driver input: misc: remove blackfin rotary driver input: keyboard: remove bf54x driver ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/Kconfig3
-rw-r--r--drivers/net/ethernet/8390/ne.c23
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adi/Kconfig66
-rw-r--r--drivers/net/ethernet/adi/Makefile5
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c1881
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h104
-rw-r--r--drivers/net/ethernet/davicom/Kconfig2
-rw-r--r--drivers/net/ethernet/smsc/Kconfig10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h26
-rw-r--r--drivers/net/ethernet/tile/Kconfig18
-rw-r--r--drivers/net/ethernet/tile/Makefile11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2279
-rw-r--r--drivers/net/ethernet/tile/tilepro.c2397
15 files changed, 9 insertions, 6820 deletions
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index fdc673484add..9fee7c83ef9f 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -87,8 +87,7 @@ config MCF8390
config NE2000
tristate "NE2000/NE1000 support"
- depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX || \
- ATARI_ETHERNEC)
+ depends on (ISA || (Q40 && m) || MACH_TX49XX || ATARI_ETHERNEC)
select CRC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 66f47987e2a2..4e05953c4fbc 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -99,7 +99,7 @@ MODULE_LICENSE("GPL");
that the ne2k probe is the last 8390 based probe to take place (as it
is at boot) and so the probe will get confused by any other 8390 cards.
ISA device autoprobes on a running machine are not recommended anyway. */
-#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R))
+#if !defined(MODULE) && defined(CONFIG_ISA)
/* Do we need a portlist for the ISA auto-probe ? */
#define NEEDS_PORTLIST
#endif
@@ -164,12 +164,7 @@ bad_clone_list[] __initdata = {
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-#if defined(CONFIG_PLAT_MAPPI)
-# define DCR_VAL 0x4b
-#elif defined(CONFIG_PLAT_OAKS32R) || \
- defined(CONFIG_MACH_TX49XX)
-# define DCR_VAL 0x48 /* 8-bit mode */
-#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
+#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49)
#else
# define DCR_VAL 0x49
@@ -422,12 +417,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
stop_page = NE1SM_STOP_PG;
}
-#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
- neX000 = ((SA_prom[14] == 0x57 && SA_prom[15] == 0x57)
- || (SA_prom[14] == 0x42 && SA_prom[15] == 0x42));
-#else
neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
-#endif
ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00);
@@ -508,18 +498,9 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->base_addr = ioaddr;
-#ifdef CONFIG_PLAT_MAPPI
- outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
- ioaddr + E8390_CMD); /* 0x61 */
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = SA_prom[i]
- = inb_p(ioaddr + EN1_PHYS_SHIFT(i));
- }
-#else
for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i];
}
-#endif
pr_cont("%pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index b6cf4b6962f5..074d760a568b 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -34,7 +34,6 @@ source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/aurora/Kconfig"
source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
@@ -176,7 +175,6 @@ source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
-source "drivers/net/ethernet/tile/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 3cdf01e96e0b..135dae67d671 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
obj-$(CONFIG_NET_CADENCE) += cadence/
-obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
@@ -88,7 +87,6 @@ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
-obj-$(CONFIG_TILE_NET) += tile/
obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
deleted file mode 100644
index 98cc8f535021..000000000000
--- a/drivers/net/ethernet/adi/Kconfig
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Blackfin device configuration
-#
-
-config NET_BFIN
- bool "Blackfin devices"
- depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537
- ---help---
- If you have a network (Ethernet) card belonging to this class, say Y.
-
- If unsure, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the remaining Blackfin card questions. If you say Y, you will be
- asked for your specific card in the following questions.
-
-if NET_BFIN
-
-config BFIN_MAC
- tristate "Blackfin on-chip MAC support"
- depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
- select CRC32
- select MII
- select PHYLIB
- select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
- ---help---
- This is the driver for Blackfin on-chip mac device. Say Y if you want
- it compiled into the kernel. This driver is also available as a
- module ( = code which can be inserted in and removed from the running
- kernel whenever you want). The module will be called bfin_mac.
-
-config BFIN_MAC_USE_L1
- bool "Use L1 memory for rx/tx packets"
- depends on BFIN_MAC && (BF527 || BF537)
- default y
- ---help---
- To get maximum network performance, you should use L1 memory as rx/tx
- buffers. Say N here if you want to reserve L1 memory for other uses.
-
-config BFIN_TX_DESC_NUM
- int "Number of transmit buffer packets"
- depends on BFIN_MAC
- range 6 10 if BFIN_MAC_USE_L1
- range 10 100
- default "10"
- ---help---
- Set the number of buffer packets used in driver.
-
-config BFIN_RX_DESC_NUM
- int "Number of receive buffer packets"
- depends on BFIN_MAC
- range 20 64
- default "20"
- ---help---
- Set the number of buffer packets used in driver.
-
-config BFIN_MAC_USE_HWSTAMP
- bool "Use IEEE 1588 hwstamp"
- depends on BFIN_MAC && BF518
- imply PTP_1588_CLOCK
- default y
- ---help---
- To support the IEEE 1588 Precision Time Protocol (PTP), select y here
-
-endif # NET_BFIN
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
deleted file mode 100644
index b1fbe195d0e8..000000000000
--- a/drivers/net/ethernet/adi/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Blackfin device drivers.
-#
-
-obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
deleted file mode 100644
index 7120f2b9c6ef..000000000000
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ /dev/null
@@ -1,1881 +0,0 @@
-/*
- * Blackfin On-Chip MAC Driver
- *
- * Copyright 2004-2010 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- */
-
-#define DRV_VERSION "1.1"
-#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/crc32.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/mii.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/skbuff.h>
-#include <linux/platform_device.h>
-
-#include <asm/dma.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/div64.h>
-#include <asm/dpmc.h>
-#include <asm/blackfin.h>
-#include <asm/cacheflush.h>
-#include <asm/portmux.h>
-#include <mach/pll.h>
-
-#include "bfin_mac.h"
-
-MODULE_AUTHOR("Bryan Wu, Luke Yang");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRV_DESC);
-MODULE_ALIAS("platform:bfin_mac");
-
-#if defined(CONFIG_BFIN_MAC_USE_L1)
-# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
-# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
-#else
-# define bfin_mac_alloc(dma_handle, size, num) \
- dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
-# define bfin_mac_free(dma_handle, ptr, num) \
- dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
-#endif
-
-#define PKT_BUF_SZ 1580
-
-#define MAX_TIMEOUT_CNT 500
-
-/* pointers to maintain transmit list */
-static struct net_dma_desc_tx *tx_list_head;
-static struct net_dma_desc_tx *tx_list_tail;
-static struct net_dma_desc_rx *rx_list_head;
-static struct net_dma_desc_rx *rx_list_tail;
-static struct net_dma_desc_rx *current_rx_ptr;
-static struct net_dma_desc_tx *current_tx_ptr;
-static struct net_dma_desc_tx *tx_desc;
-static struct net_dma_desc_rx *rx_desc;
-
-static void desc_list_free(void)
-{
- struct net_dma_desc_rx *r;
- struct net_dma_desc_tx *t;
- int i;
-#if !defined(CONFIG_BFIN_MAC_USE_L1)
- dma_addr_t dma_handle = 0;
-#endif
-
- if (tx_desc) {
- t = tx_list_head;
- for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
- if (t) {
- if (t->skb) {
- dev_kfree_skb(t->skb);
- t->skb = NULL;
- }
- t = t->next;
- }
- }
- bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
- }
-
- if (rx_desc) {
- r = rx_list_head;
- for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
- if (r) {
- if (r->skb) {
- dev_kfree_skb(r->skb);
- r->skb = NULL;
- }
- r = r->next;
- }
- }
- bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
- }
-}
-
-static int desc_list_init(struct net_device *dev)
-{
- int i;
- struct sk_buff *new_skb;
-#if !defined(CONFIG_BFIN_MAC_USE_L1)
- /*
- * This dma_handle is useless in Blackfin dma_alloc_coherent().
- * The real dma handler is the return value of dma_alloc_coherent().
- */
- dma_addr_t dma_handle;
-#endif
-
- tx_desc = bfin_mac_alloc(&dma_handle,
- sizeof(struct net_dma_desc_tx),
- CONFIG_BFIN_TX_DESC_NUM);
- if (tx_desc == NULL)
- goto init_error;
-
- rx_desc = bfin_mac_alloc(&dma_handle,
- sizeof(struct net_dma_desc_rx),
- CONFIG_BFIN_RX_DESC_NUM);
- if (rx_desc == NULL)
- goto init_error;
-
- /* init tx_list */
- tx_list_head = tx_list_tail = tx_desc;
-
- for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
- struct net_dma_desc_tx *t = tx_desc + i;
- struct dma_descriptor *a = &(t->desc_a);
- struct dma_descriptor *b = &(t->desc_b);
-
- /*
- * disable DMA
- * read from memory WNR = 0
- * wordsize is 32 bits
- * 6 half words is desc size
- * large desc flow
- */
- a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
- a->start_addr = (unsigned long)t->packet;
- a->x_count = 0;
- a->next_dma_desc = b;
-
- /*
- * enabled DMA
- * write to memory WNR = 1
- * wordsize is 32 bits
- * disable interrupt
- * 6 half words is desc size
- * large desc flow
- */
- b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
- b->start_addr = (unsigned long)(&(t->status));
- b->x_count = 0;
-
- t->skb = NULL;
- tx_list_tail->desc_b.next_dma_desc = a;
- tx_list_tail->next = t;
- tx_list_tail = t;
- }
- tx_list_tail->next = tx_list_head; /* tx_list is a circle */
- tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
- current_tx_ptr = tx_list_head;
-
- /* init rx_list */
- rx_list_head = rx_list_tail = rx_desc;
-
- for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
- struct net_dma_desc_rx *r = rx_desc + i;
- struct dma_descriptor *a = &(r->desc_a);
- struct dma_descriptor *b = &(r->desc_b);
-
- /* allocate a new skb for next time receive */
- new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
- if (!new_skb)
- goto init_error;
-
- skb_reserve(new_skb, NET_IP_ALIGN);
- /* Invalidate the data cache of skb->data range when it is write back
- * cache. It will prevent overwriting the new data from DMA
- */
- blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
- (unsigned long)new_skb->end);
- r->skb = new_skb;
-
- /*
- * enabled DMA
- * write to memory WNR = 1
- * wordsize is 32 bits
- * disable interrupt
- * 6 half words is desc size
- * large desc flow
- */
- a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
- /* since RXDWA is enabled */
- a->start_addr = (unsigned long)new_skb->data - 2;
- a->x_count = 0;
- a->next_dma_desc = b;
-
- /*
- * enabled DMA
- * write to memory WNR = 1
- * wordsize is 32 bits
- * enable interrupt
- * 6 half words is desc size
- * large desc flow
- */
- b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
- NDSIZE_6 | DMAFLOW_LARGE;
- b->start_addr = (unsigned long)(&(r->status));
- b->x_count = 0;
-
- rx_list_tail->desc_b.next_dma_desc = a;
- rx_list_tail->next = r;
- rx_list_tail = r;
- }
- rx_list_tail->next = rx_list_head; /* rx_list is a circle */
- rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
- current_rx_ptr = rx_list_head;
-
- return 0;
-
-init_error:
- desc_list_free();
- pr_err("kmalloc failed\n");
- return -ENOMEM;
-}
-
-
-/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
-
-/*
- * MII operations
- */
-/* Wait until the previous MDC/MDIO transaction has completed */
-static int bfin_mdio_poll(void)
-{
- int timeout_cnt = MAX_TIMEOUT_CNT;
-
- /* poll the STABUSY bit */
- while ((bfin_read_EMAC_STAADD()) & STABUSY) {
- udelay(1);
- if (timeout_cnt-- < 0) {
- pr_err("wait MDC/MDIO transaction to complete timeout\n");
- return -ETIMEDOUT;
- }
- }
-
- return 0;
-}
-
-/* Read an off-chip register in a PHY through the MDC/MDIO port */
-static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- int ret;
-
- ret = bfin_mdio_poll();
- if (ret)
- return ret;
-
- /* read mode */
- bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
- SET_REGAD((u16) regnum) |
- STABUSY);
-
- ret = bfin_mdio_poll();
- if (ret)
- return ret;
-
- return (int) bfin_read_EMAC_STADAT();
-}
-
-/* Write an off-chip register in a PHY through the MDC/MDIO port */
-static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
- u16 value)
-{
- int ret;
-
- ret = bfin_mdio_poll();
- if (ret)
- return ret;
-
- bfin_write_EMAC_STADAT((u32) value);
-
- /* write mode */
- bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
- SET_REGAD((u16) regnum) |
- STAOP |
- STABUSY);
-
- return bfin_mdio_poll();
-}
-
-static void bfin_mac_adjust_link(struct net_device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- unsigned long flags;
- int new_state = 0;
-
- spin_lock_irqsave(&lp->lock, flags);
- if (phydev->link) {
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != lp->old_duplex) {
- u32 opmode = bfin_read_EMAC_OPMODE();
- new_state = 1;
-
- if (phydev->duplex)
- opmode |= FDMODE;
- else
- opmode &= ~(FDMODE);
-
- bfin_write_EMAC_OPMODE(opmode);
- lp->old_duplex = phydev->duplex;
- }
-
- if (phydev->speed != lp->old_speed) {
- if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
- u32 opmode = bfin_read_EMAC_OPMODE();
- switch (phydev->speed) {
- case 10:
- opmode |= RMII_10;
- break;
- case 100:
- opmode &= ~RMII_10;
- break;
- default:
- netdev_warn(dev,
- "Ack! Speed (%d) is not 10/100!\n",
- phydev->speed);
- break;
- }
- bfin_write_EMAC_OPMODE(opmode);
- }
-
- new_state = 1;
- lp->old_speed = phydev->speed;
- }
-
- if (!lp->old_link) {
- new_state = 1;
- lp->old_link = 1;
- }
- } else if (lp->old_link) {
- new_state = 1;
- lp->old_link = 0;
- lp->old_speed = 0;
- lp->old_duplex = -1;
- }
-
- if (new_state) {
- u32 opmode = bfin_read_EMAC_OPMODE();
- phy_print_status(phydev);
- pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
- }
-
- spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-/* MDC = 2.5 MHz */
-#define MDC_CLK 2500000
-
-static int mii_probe(struct net_device *dev, int phy_mode)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
- struct phy_device *phydev;
- unsigned short sysctl;
- u32 sclk, mdc_div;
-
- /* Enable PHY output early */
- if (!(bfin_read_VR_CTL() & CLKBUFOE))
- bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
-
- sclk = get_sclk();
- mdc_div = ((sclk / MDC_CLK) / 2) - 1;
-
- sysctl = bfin_read_EMAC_SYSCTL();
- sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
- bfin_write_EMAC_SYSCTL(sysctl);
-
- phydev = phy_find_first(lp->mii_bus);
- if (!phydev) {
- netdev_err(dev, "no phy device found\n");
- return -ENODEV;
- }
-
- if (phy_mode != PHY_INTERFACE_MODE_RMII &&
- phy_mode != PHY_INTERFACE_MODE_MII) {
- netdev_err(dev, "invalid phy interface mode\n");
- return -EINVAL;
- }
-
- phydev = phy_connect(dev, phydev_name(phydev),
- &bfin_mac_adjust_link, phy_mode);
-
- if (IS_ERR(phydev)) {
- netdev_err(dev, "could not attach PHY\n");
- return PTR_ERR(phydev);
- }
-
- /* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg
- | SUPPORTED_Pause | SUPPORTED_Asym_Pause
- | SUPPORTED_MII
- | SUPPORTED_TP);
-
- phydev->advertising = phydev->supported;
-
- lp->old_link = 0;
- lp->old_speed = 0;
- lp->old_duplex = -1;
-
- phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
- MDC_CLK, mdc_div, sclk / 1000000);
-
- return 0;
-}
-
-/*
- * Ethtool support
- */
-
-/*
- * interrupt routine for magic packet wakeup
- */
-static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
-{
- return IRQ_HANDLED;
-}
-
-static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
-}
-
-static void bfin_mac_ethtool_getwol(struct net_device *dev,
- struct ethtool_wolinfo *wolinfo)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- wolinfo->supported = WAKE_MAGIC;
- wolinfo->wolopts = lp->wol;
-}
-
-static int bfin_mac_ethtool_setwol(struct net_device *dev,
- struct ethtool_wolinfo *wolinfo)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
- int rc;
-
- if (wolinfo->wolopts & (WAKE_MAGICSECURE |
- WAKE_UCAST |
- WAKE_MCAST |
- WAKE_BCAST |
- WAKE_ARP))
- return -EOPNOTSUPP;
-
- lp->wol = wolinfo->wolopts;
-
- if (lp->wol && !lp->irq_wake_requested) {
- /* register wake irq handler */
- rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
- 0, "EMAC_WAKE", dev);
- if (rc)
- return rc;
- lp->irq_wake_requested = true;
- }
-
- if (!lp->wol && lp->irq_wake_requested) {
- free_irq(IRQ_MAC_WAKEDET, dev);
- lp->irq_wake_requested = false;
- }
-
- /* Make sure the PHY driver doesn't suspend */
- device_init_wakeup(&dev->dev, lp->wol);
-
- return 0;
-}
-
-#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
-static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = lp->phc_index;
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
- return 0;
-}
-#endif
-
-static const struct ethtool_ops bfin_mac_ethtool_ops = {
- .get_link = ethtool_op_get_link,
- .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
- .get_wol = bfin_mac_ethtool_getwol,
- .set_wol = bfin_mac_ethtool_setwol,
-#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
- .get_ts_info = bfin_mac_ethtool_get_ts_info,
-#endif
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-/**************************************************************************/
-static void setup_system_regs(struct net_device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
- int i;
- unsigned short sysctl;
-
- /*
- * Odd word alignment for Receive Frame DMA word
- * Configure checksum support and rcve frame word alignment
- */
- sysctl = bfin_read_EMAC_SYSCTL();
- /*
- * check if interrupt is requested for any PHY,
- * enable PHY interrupt only if needed
- */
- for (i = 0; i < PHY_MAX_ADDR; ++i)
- if (lp->mii_bus->irq[i] != PHY_POLL)
- break;
- if (i < PHY_MAX_ADDR)
- sysctl |= PHYIE;
- sysctl |= RXDWA;
-#if defined(BFIN_MAC_CSUM_OFFLOAD)
- sysctl |= RXCKS;
-#else
- sysctl &= ~RXCKS;
-#endif
- bfin_write_EMAC_SYSCTL(sysctl);
-
- bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
-
- /* Set vlan regs to let 1522 bytes long packets pass through */
- bfin_write_EMAC_VLAN1(lp->vlan1_mask);
- bfin_write_EMAC_VLAN2(lp->vlan2_mask);
-
- /* Initialize the TX DMA channel registers */
- bfin_write_DMA2_X_COUNT(0);
- bfin_write_DMA2_X_MODIFY(4);
- bfin_write_DMA2_Y_COUNT(0);
- bfin_write_DMA2_Y_MODIFY(0);
-
- /* Initialize the RX DMA channel registers */
- bfin_write_DMA1_X_COUNT(0);
- bfin_write_DMA1_X_MODIFY(4);
- bfin_write_DMA1_Y_COUNT(0);
- bfin_write_DMA1_Y_MODIFY(0);
-}
-
-static void setup_mac_addr(u8 *mac_addr)
-{
- u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
- u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
-
- /* this depends on a little-endian machine */
- bfin_write_EMAC_ADDRLO(addr_low);
- bfin_write_EMAC_ADDRHI(addr_hi);
-}
-
-static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- if (netif_running(dev))
- return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- setup_mac_addr(dev->dev_addr);
- return 0;
-}
-
-#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
-#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
-
-static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
-{
- u32 ipn = 1000000000UL / input_clk;
- u32 ppn = 1;
- unsigned int shift = 0;
-
- while (ppn <= ipn) {
- ppn <<= 1;
- shift++;
- }
- *shift_result = shift;
- return 1000000000UL / ppn;
-}
-
-static int bfin_mac_hwtstamp_set(struct net_device *netdev,
- struct ifreq *ifr)
-{
- struct hwtstamp_config config;
- struct bfin_mac_local *lp = netdev_priv(netdev);
- u16 ptpctl;
- u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
-
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
- if ((config.tx_type != HWTSTAMP_TX_OFF) &&
- (config.tx_type != HWTSTAMP_TX_ON))
- return -ERANGE;
-
- ptpctl = bfin_read_EMAC_PTP_CTL();
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- /*
- * Dont allow any timestamping
- */
- ptpfv3 = 0xFFFFFFFF;
- bfin_write_EMAC_PTP_FV3(ptpfv3);
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- /*
- * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
- * to enable all the field matches.
- */
- ptpctl &= ~0x1F00;
- bfin_write_EMAC_PTP_CTL(ptpctl);
- /*
- * Keep the default values of the EMAC_PTP_FOFF register.
- */
- ptpfoff = 0x4A24170C;
- bfin_write_EMAC_PTP_FOFF(ptpfoff);
- /*
- * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
- * registers.
- */
- ptpfv1 = 0x11040800;
- bfin_write_EMAC_PTP_FV1(ptpfv1);
- ptpfv2 = 0x0140013F;
- bfin_write_EMAC_PTP_FV2(ptpfv2);
- /*
- * The default value (0xFFFC) allows the timestamping of both
- * received Sync messages and Delay_Req messages.
- */
- ptpfv3 = 0xFFFFFFFC;
- bfin_write_EMAC_PTP_FV3(ptpfv3);
-
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- /* Clear all five comparison mask bits (bits[12:8]) in the
- * EMAC_PTP_CTL register to enable all the field matches.
- */
- ptpctl &= ~0x1F00;
- bfin_write_EMAC_PTP_CTL(ptpctl);
- /*
- * Keep the default values of the EMAC_PTP_FOFF register, except set
- * the PTPCOF field to 0x2A.
- */
- ptpfoff = 0x2A24170C;
- bfin_write_EMAC_PTP_FOFF(ptpfoff);
- /*
- * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
- * registers.
- */
- ptpfv1 = 0x11040800;
- bfin_write_EMAC_PTP_FV1(ptpfv1);
- ptpfv2 = 0x0140013F;
- bfin_write_EMAC_PTP_FV2(ptpfv2);
- /*
- * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
- * the value to 0xFFF0.
- */
- ptpfv3 = 0xFFFFFFF0;
- bfin_write_EMAC_PTP_FV3(ptpfv3);
-
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- /*
- * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
- * EFTM and PTPCM field comparison.
- */
- ptpctl &= ~0x1100;
- bfin_write_EMAC_PTP_CTL(ptpctl);
- /*
- * Keep the default values of all the fields of the EMAC_PTP_FOFF
- * register, except set the PTPCOF field to 0x0E.
- */
- ptpfoff = 0x0E24170C;
- bfin_write_EMAC_PTP_FOFF(ptpfoff);
- /*
- * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
- * corresponds to PTP messages on the MAC layer.
- */
- ptpfv1 = 0x110488F7;
- bfin_write_EMAC_PTP_FV1(ptpfv1);
- ptpfv2 = 0x0140013F;
- bfin_write_EMAC_PTP_FV2(ptpfv2);
- /*
- * To allow the timestamping of Pdelay_Req and Pdelay_Resp
- * messages, set the value to 0xFFF0.
- */
- ptpfv3 = 0xFFFFFFF0;
- bfin_write_EMAC_PTP_FV3(ptpfv3);
-
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- break;
- default:
- return -ERANGE;
- }
-
- if (config.tx_type == HWTSTAMP_TX_OFF &&
- bfin_mac_hwtstamp_is_none(config.rx_filter)) {
- ptpctl &= ~PTP_EN;
- bfin_write_EMAC_PTP_CTL(ptpctl);
-
- SSYNC();
- } else {
- ptpctl |= PTP_EN;
- bfin_write_EMAC_PTP_CTL(ptpctl);
-
- /*
- * clear any existing timestamp
- */
- bfin_read_EMAC_PTP_RXSNAPLO();
- bfin_read_EMAC_PTP_RXSNAPHI();
-
- bfin_read_EMAC_PTP_TXSNAPLO();
- bfin_read_EMAC_PTP_TXSNAPHI();
-
- SSYNC();
- }
-
- lp->stamp_cfg = config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int bfin_mac_hwtstamp_get(struct net_device *netdev,
- struct ifreq *ifr)
-{
- struct bfin_mac_local *lp = netdev_priv(netdev);
-
- return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
- sizeof(lp->stamp_cfg)) ?
- -EFAULT : 0;
-}
-
-static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
-{
- struct bfin_mac_local *lp = netdev_priv(netdev);
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- int timeout_cnt = MAX_TIMEOUT_CNT;
-
- /* When doing time stamping, keep the connection to the socket
- * a while longer
- */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
- /*
- * The timestamping is done at the EMAC module's MII/RMII interface
- * when the module sees the Start of Frame of an event message packet. This
- * interface is the closest possible place to the physical Ethernet transmission
- * medium, providing the best timing accuracy.
- */
- while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
- udelay(1);
- if (timeout_cnt == 0)
- netdev_err(netdev, "timestamp the TX packet failed\n");
- else {
- struct skb_shared_hwtstamps shhwtstamps;
- u64 ns;
- u64 regval;
-
- regval = bfin_read_EMAC_PTP_TXSNAPLO();
- regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ns = regval << lp->shift;
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
- }
-}
-
-static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
-{
- struct bfin_mac_local *lp = netdev_priv(netdev);
- u32 valid;
- u64 regval, ns;
- struct skb_shared_hwtstamps *shhwtstamps;
-
- if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
- return;
-
- valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
- if (!valid)
- return;
-
- shhwtstamps = skb_hwtstamps(skb);
-
- regval = bfin_read_EMAC_PTP_RXSNAPLO();
- regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
- ns = regval << lp->shift;
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
-}
-
-static void bfin_mac_hwtstamp_init(struct net_device *netdev)
-{
- struct bfin_mac_local *lp = netdev_priv(netdev);
- u64 addend, ppb;
- u32 input_clk, phc_clk;
-
- /* Initialize hardware timer */
- input_clk = get_sclk();
- phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
- addend = phc_clk * (1ULL << 32);
- do_div(addend, input_clk);
- bfin_write_EMAC_PTP_ADDEND((u32)addend);
-
- lp->addend = addend;
- ppb = 1000000000ULL * input_clk;
- do_div(ppb, phc_clk);
- lp->max_ppb = ppb - 1000000000ULL - 1ULL;
-
- /* Initialize hwstamp config */
- lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
-}
-
-static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
-{
- u64 ns;
- u32 lo, hi;
-
- lo = bfin_read_EMAC_PTP_TIMELO();
- hi = bfin_read_EMAC_PTP_TIMEHI();
-
- ns = ((u64) hi) << 32;
- ns |= lo;
- ns <<= lp->shift;
-
- return ns;
-}
-
-static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
-{
- u32 hi, lo;
-
- ns >>= lp->shift;
- hi = ns >> 32;
- lo = ns & 0xffffffff;
-
- bfin_write_EMAC_PTP_TIMELO(lo);
- bfin_write_EMAC_PTP_TIMEHI(hi);
-}
-
-/* PTP Hardware Clock operations */
-
-static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
- u64 adj;
- u32 diff, addend;
- int neg_adj = 0;
- struct bfin_mac_local *lp =
- container_of(ptp, struct bfin_mac_local, caps);
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- addend = lp->addend;
- adj = addend;
- adj *= ppb;
- diff = div_u64(adj, 1000000000ULL);
-
- addend = neg_adj ? addend - diff : addend + diff;
-
- bfin_write_EMAC_PTP_ADDEND(addend);
-
- return 0;
-}
-
-static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- s64 now;
- unsigned long flags;
- struct bfin_mac_local *lp =
- container_of(ptp, struct bfin_mac_local, caps);
-
- spin_lock_irqsave(&lp->phc_lock, flags);
-
- now = bfin_ptp_time_read(lp);
- now += delta;
- bfin_ptp_time_write(lp, now);
-
- spin_unlock_irqrestore(&lp->phc_lock, flags);
-
- return 0;
-}
-
-static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- u64 ns;
- unsigned long flags;
- struct bfin_mac_local *lp =
- container_of(ptp, struct bfin_mac_local, caps);
-
- spin_lock_irqsave(&lp->phc_lock, flags);
-
- ns = bfin_ptp_time_read(lp);
-
- spin_unlock_irqrestore(&lp->phc_lock, flags);
-
- *ts = ns_to_timespec64(ns);
-
- return 0;
-}
-
-static int bfin_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- u64 ns;
- unsigned long flags;
- struct bfin_mac_local *lp =
- container_of(ptp, struct bfin_mac_local, caps);
-
- ns = timespec64_to_ns(ts);
-
- spin_lock_irqsave(&lp->phc_lock, flags);
-
- bfin_ptp_time_write(lp, ns);
-
- spin_unlock_irqrestore(&lp->phc_lock, flags);
-
- return 0;
-}
-
-static int bfin_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
-{
- return -EOPNOTSUPP;
-}
-
-static const struct ptp_clock_info bfin_ptp_caps = {
- .owner = THIS_MODULE,
- .name = "BF518 clock",
- .max_adj = 0,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 0,
- .adjfreq = bfin_ptp_adjfreq,
- .adjtime = bfin_ptp_adjtime,
- .gettime64 = bfin_ptp_gettime,
- .settime64 = bfin_ptp_settime,
- .enable = bfin_ptp_enable,
-};
-
-static int bfin_phc_init(struct net_device *netdev, struct device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(netdev);
-
- lp->caps = bfin_ptp_caps;
- lp->caps.max_adj = lp->max_ppb;
- lp->clock = ptp_clock_register(&lp->caps, dev);
- if (IS_ERR(lp->clock))
- return PTR_ERR(lp->clock);
-
- lp->phc_index = ptp_clock_index(lp->clock);
- spin_lock_init(&lp->phc_lock);
-
- return 0;
-}
-
-static void bfin_phc_release(struct bfin_mac_local *lp)
-{
- ptp_clock_unregister(lp->clock);
-}
-
-#else
-# define bfin_mac_hwtstamp_is_none(cfg) 0
-# define bfin_mac_hwtstamp_init(dev)
-# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
-# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
-# define bfin_rx_hwtstamp(dev, skb)
-# define bfin_tx_hwtstamp(dev, skb)
-# define bfin_phc_init(netdev, dev) 0
-# define bfin_phc_release(lp)
-#endif
-
-static inline void _tx_reclaim_skb(void)
-{
- do {
- tx_list_head->desc_a.config &= ~DMAEN;
- tx_list_head->status.status_word = 0;
- if (tx_list_head->skb) {
- dev_consume_skb_any(tx_list_head->skb);
- tx_list_head->skb = NULL;
- }
- tx_list_head = tx_list_head->next;
-
- } while (tx_list_head->status.status_word != 0);
-}
-
-static void tx_reclaim_skb(struct bfin_mac_local *lp)
-{
- int timeout_cnt = MAX_TIMEOUT_CNT;
-
- if (tx_list_head->status.status_word != 0)
- _tx_reclaim_skb();
-
- if (current_tx_ptr->next == tx_list_head) {
- while (tx_list_head->status.status_word == 0) {
- /* slow down polling to avoid too many queue stop. */
- udelay(10);
- /* reclaim skb if DMA is not running. */
- if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
- break;
- if (timeout_cnt-- < 0)
- break;
- }
-
- if (timeout_cnt >= 0)
- _tx_reclaim_skb();
- else
- netif_stop_queue(lp->ndev);
- }
-
- if (current_tx_ptr->next != tx_list_head &&
- netif_queue_stopped(lp->ndev))
- netif_wake_queue(lp->ndev);
-
- if (tx_list_head != current_tx_ptr) {
- /* shorten the timer interval if tx queue is stopped */
- if (netif_queue_stopped(lp->ndev))
- lp->tx_reclaim_timer.expires =
- jiffies + (TX_RECLAIM_JIFFIES >> 4);
- else
- lp->tx_reclaim_timer.expires =
- jiffies + TX_RECLAIM_JIFFIES;
-
- mod_timer(&lp->tx_reclaim_timer,
- lp->tx_reclaim_timer.expires);
- }
-
- return;
-}
-
-static void tx_reclaim_skb_timeout(struct timer_list *t)
-{
- struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer);
-
- tx_reclaim_skb(lp);
-}
-
-static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
- u16 *data;
- u32 data_align = (unsigned long)(skb->data) & 0x3;
-
- current_tx_ptr->skb = skb;
-
- if (data_align == 0x2) {
- /* move skb->data to current_tx_ptr payload */
- data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
- /*
- * When transmitting an Ethernet packet, the PTP_TSYNC module requires
- * a DMA_Length_Word field associated with the packet. The lower 12 bits
- * of this field are the length of the packet payload in bytes and the higher
- * 4 bits are the timestamping enable field.
- */
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- *data |= 0x1000;
-
- current_tx_ptr->desc_a.start_addr = (u32)data;
- /* this is important! */
- blackfin_dcache_flush_range((u32)data,
- (u32)((u8 *)data + skb->len + 4));
- } else {
- *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
- /* enable timestamping for the sent packet */
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
- memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
- skb->len);
- current_tx_ptr->desc_a.start_addr =
- (u32)current_tx_ptr->packet;
- blackfin_dcache_flush_range(
- (u32)current_tx_ptr->packet,
- (u32)(current_tx_ptr->packet + skb->len + 2));
- }
-
- /* make sure the internal data buffers in the core are drained
- * so that the DMA descriptors are completely written when the
- * DMA engine goes to fetch them below
- */
- SSYNC();
-
- /* always clear status buffer before start tx dma */
- current_tx_ptr->status.status_word = 0;
-
- /* enable this packet's dma */
- current_tx_ptr->desc_a.config |= DMAEN;
-
- /* tx dma is running, just return */
- if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
- goto out;
-
- /* tx dma is not running */
- bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
- /* dma enabled, read from memory, size is 6 */
- bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
- /* Turn on the EMAC tx */
- bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
-
-out:
- bfin_tx_hwtstamp(dev, skb);
-
- current_tx_ptr = current_tx_ptr->next;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += (skb->len);
-
- tx_reclaim_skb(lp);
-
- return NETDEV_TX_OK;
-}
-
-#define IP_HEADER_OFF 0
-#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
- RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
-
-static void bfin_mac_rx(struct bfin_mac_local *lp)
-{
- struct net_device *dev = lp->ndev;
- struct sk_buff *skb, *new_skb;
- unsigned short len;
-#if defined(BFIN_MAC_CSUM_OFFLOAD)
- unsigned int i;
- unsigned char fcs[ETH_FCS_LEN + 1];
-#endif
-
- /* check if frame status word reports an error condition
- * we which case we simply drop the packet
- */
- if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
- netdev_notice(dev, "rx: receive error - packet dropped\n");
- dev->stats.rx_dropped++;
- goto out;
- }
-
- /* allocate a new skb for next time receive */
- skb = current_rx_ptr->skb;
-
- new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
- if (!new_skb) {
- dev->stats.rx_dropped++;
- goto out;
- }
- /* reserve 2 bytes for RXDWA padding */
- skb_reserve(new_skb, NET_IP_ALIGN);
- /* Invalidate the data cache of skb->data range when it is write back
- * cache. It will prevent overwriting the new data from DMA
- */
- blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
- (unsigned long)new_skb->end);
-
- current_rx_ptr->skb = new_skb;
- current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
-
- len = (unsigned short)(current_rx_ptr->status.status_word & RX_FRLEN);
- /* Deduce Ethernet FCS length from Ethernet payload length */
- len -= ETH_FCS_LEN;
- skb_put(skb, len);
-
- skb->protocol = eth_type_trans(skb, dev);
-
- bfin_rx_hwtstamp(dev, skb);
-
-#if defined(BFIN_MAC_CSUM_OFFLOAD)
- /* Checksum offloading only works for IPv4 packets with the standard IP header
- * length of 20 bytes, because the blackfin MAC checksum calculation is
- * based on that assumption. We must NOT use the calculated checksum if our
- * IP version or header break that assumption.
- */
- if (skb->data[IP_HEADER_OFF] == 0x45) {
- skb->csum = current_rx_ptr->status.ip_payload_csum;
- /*
- * Deduce Ethernet FCS from hardware generated IP payload checksum.
- * IP checksum is based on 16-bit one's complement algorithm.
- * To deduce a value from checksum is equal to add its inversion.
- * If the IP payload len is odd, the inversed FCS should also
- * begin from odd address and leave first byte zero.
- */
- if (skb->len % 2) {
- fcs[0] = 0;
- for (i = 0; i < ETH_FCS_LEN; i++)
- fcs[i + 1] = ~skb->data[skb->len + i];
- skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
- } else {
- for (i = 0; i < ETH_FCS_LEN; i++)
- fcs[i] = ~skb->data[skb->len + i];
- skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
- }
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
-#endif
-
- napi_gro_receive(&lp->napi, skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-out:
- current_rx_ptr->status.status_word = 0x00000000;
- current_rx_ptr = current_rx_ptr->next;
-}
-
-static int bfin_mac_poll(struct napi_struct *napi, int budget)
-{
- int i = 0;
- struct bfin_mac_local *lp = container_of(napi,
- struct bfin_mac_local,
- napi);
-
- while (current_rx_ptr->status.status_word != 0 && i < budget) {
- bfin_mac_rx(lp);
- i++;
- }
-
- if (i < budget) {
- napi_complete_done(napi, i);
- if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
- enable_irq(IRQ_MAC_RX);
- }
-
- return i;
-}
-
-/* interrupt routine to handle rx and error signal */
-static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
-{
- struct bfin_mac_local *lp = netdev_priv(dev_id);
- u32 status;
-
- status = bfin_read_DMA1_IRQ_STATUS();
-
- bfin_write_DMA1_IRQ_STATUS(status | DMA_DONE | DMA_ERR);
- if (status & DMA_DONE) {
- disable_irq_nosync(IRQ_MAC_RX);
- set_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags);
- napi_schedule(&lp->napi);
- }
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bfin_mac_poll_controller(struct net_device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- bfin_mac_interrupt(IRQ_MAC_RX, dev);
- tx_reclaim_skb(lp);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-static void bfin_mac_disable(void)
-{
- unsigned int opmode;
-
- opmode = bfin_read_EMAC_OPMODE();
- opmode &= (~RE);
- opmode &= (~TE);
- /* Turn off the EMAC */
- bfin_write_EMAC_OPMODE(opmode);
-}
-
-/*
- * Enable Interrupts, Receive, and Transmit
- */
-static int bfin_mac_enable(struct phy_device *phydev)
-{
- int ret;
- u32 opmode;
-
- pr_debug("%s\n", __func__);
-
- /* Set RX DMA */
- bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
- bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
-
- /* Wait MII done */
- ret = bfin_mdio_poll();
- if (ret)
- return ret;
-
- /* We enable only RX here */
- /* ASTP : Enable Automatic Pad Stripping
- PR : Promiscuous Mode for test
- PSF : Receive frames with total length less than 64 bytes.
- FDMODE : Full Duplex Mode
- LB : Internal Loopback for test
- RE : Receiver Enable */
- opmode = bfin_read_EMAC_OPMODE();
- if (opmode & FDMODE)
- opmode |= PSF;
- else
- opmode |= DRO | DC | PSF;
- opmode |= RE;
-
- if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
- opmode |= RMII; /* For Now only 100MBit are supported */
-#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
- if (__SILICON_REVISION__ < 3) {
- /*
- * This isn't publicly documented (fun times!), but in
- * silicon <=0.2, the RX and TX pins are clocked together.
- * So in order to recv, we must enable the transmit side
- * as well. This will cause a spurious TX interrupt too,
- * but we can easily consume that.
- */
- opmode |= TE;
- }
-#endif
- }
-
- /* Turn on the EMAC rx */
- bfin_write_EMAC_OPMODE(opmode);
-
- return 0;
-}
-
-/* Our watchdog timed out. Called by the networking layer */
-static void bfin_mac_timeout(struct net_device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- pr_debug("%s: %s\n", dev->name, __func__);
-
- bfin_mac_disable();
-
- del_timer(&lp->tx_reclaim_timer);
-
- /* reset tx queue and free skb */
- while (tx_list_head != current_tx_ptr) {
- tx_list_head->desc_a.config &= ~DMAEN;
- tx_list_head->status.status_word = 0;
- if (tx_list_head->skb) {
- dev_kfree_skb(tx_list_head->skb);
- tx_list_head->skb = NULL;
- }
- tx_list_head = tx_list_head->next;
- }
-
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
-
- bfin_mac_enable(dev->phydev);
-
- /* We can accept TX packets again */
- netif_trans_update(dev); /* prevent tx timeout */
-}
-
-static void bfin_mac_multicast_hash(struct net_device *dev)
-{
- u32 emac_hashhi, emac_hashlo;
- struct netdev_hw_addr *ha;
- u32 crc;
-
- emac_hashhi = emac_hashlo = 0;
-
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc(ETH_ALEN, ha->addr);
- crc >>= 26;
-
- if (crc & 0x20)
- emac_hashhi |= 1 << (crc & 0x1f);
- else
- emac_hashlo |= 1 << (crc & 0x1f);
- }
-
- bfin_write_EMAC_HASHHI(emac_hashhi);
- bfin_write_EMAC_HASHLO(emac_hashlo);
-}
-
-/*
- * This routine will, depending on the values passed to it,
- * either make it accept multicast packets, go into
- * promiscuous mode (for TCPDUMP and cousins) or accept
- * a select set of multicast packets
- */
-static void bfin_mac_set_multicast_list(struct net_device *dev)
-{
- u32 sysctl;
-
- if (dev->flags & IFF_PROMISC) {
- netdev_info(dev, "set promisc mode\n");
- sysctl = bfin_read_EMAC_OPMODE();
- sysctl |= PR;
- bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->flags & IFF_ALLMULTI) {
- /* accept all multicast */
- sysctl = bfin_read_EMAC_OPMODE();
- sysctl |= PAM;
- bfin_write_EMAC_OPMODE(sysctl);
- } else if (!netdev_mc_empty(dev)) {
- /* set up multicast hash table */
- sysctl = bfin_read_EMAC_OPMODE();
- sysctl |= HM;
- bfin_write_EMAC_OPMODE(sysctl);
- bfin_mac_multicast_hash(dev);
- } else {
- /* clear promisc or multicast mode */
- sysctl = bfin_read_EMAC_OPMODE();
- sysctl &= ~(RAF | PAM);
- bfin_write_EMAC_OPMODE(sysctl);
- }
-}
-
-static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bfin_mac_hwtstamp_set(netdev, ifr);
- case SIOCGHWTSTAMP:
- return bfin_mac_hwtstamp_get(netdev, ifr);
- default:
- if (netdev->phydev)
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
- else
- return -EOPNOTSUPP;
- }
-}
-
-/*
- * this puts the device in an inactive state
- */
-static void bfin_mac_shutdown(struct net_device *dev)
-{
- /* Turn off the EMAC */
- bfin_write_EMAC_OPMODE(0x00000000);
- /* Turn off the EMAC RX DMA */
- bfin_write_DMA1_CONFIG(0x0000);
- bfin_write_DMA2_CONFIG(0x0000);
-}
-
-/*
- * Open and Initialize the interface
- *
- * Set up everything, reset the card, etc..
- */
-static int bfin_mac_open(struct net_device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
- int ret;
- pr_debug("%s: %s\n", dev->name, __func__);
-
- /*
- * Check that the address is valid. If its not, refuse
- * to bring the device up. The user must specify an
- * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
- */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- netdev_warn(dev, "no valid ethernet hw addr\n");
- return -EINVAL;
- }
-
- /* initial rx and tx list */
- ret = desc_list_init(dev);
- if (ret)
- return ret;
-
- phy_start(dev->phydev);
- setup_system_regs(dev);
- setup_mac_addr(dev->dev_addr);
-
- bfin_mac_disable();
- ret = bfin_mac_enable(dev->phydev);
- if (ret)
- return ret;
- pr_debug("hardware init finished\n");
-
- napi_enable(&lp->napi);
- netif_start_queue(dev);
- netif_carrier_on(dev);
-
- return 0;
-}
-
-/*
- * this makes the board clean up everything that it can
- * and not talk to the outside world. Caused by
- * an 'ifconfig ethX down'
- */
-static int bfin_mac_close(struct net_device *dev)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
- pr_debug("%s: %s\n", dev->name, __func__);
-
- netif_stop_queue(dev);
- napi_disable(&lp->napi);
- netif_carrier_off(dev);
-
- phy_stop(dev->phydev);
- phy_write(dev->phydev, MII_BMCR, BMCR_PDOWN);
-
- /* clear everything */
- bfin_mac_shutdown(dev);
-
- /* free the rx/tx buffers */
- desc_list_free();
-
- return 0;
-}
-
-static const struct net_device_ops bfin_mac_netdev_ops = {
- .ndo_open = bfin_mac_open,
- .ndo_stop = bfin_mac_close,
- .ndo_start_xmit = bfin_mac_hard_start_xmit,
- .ndo_set_mac_address = bfin_mac_set_mac_address,
- .ndo_tx_timeout = bfin_mac_timeout,
- .ndo_set_rx_mode = bfin_mac_set_multicast_list,
- .ndo_do_ioctl = bfin_mac_ioctl,
- .ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bfin_mac_poll_controller,
-#endif
-};
-
-static int bfin_mac_probe(struct platform_device *pdev)
-{
- struct net_device *ndev;
- struct bfin_mac_local *lp;
- struct platform_device *pd;
- struct bfin_mii_bus_platform_data *mii_bus_data;
- int rc;
-
- ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
- if (!ndev)
- return -ENOMEM;
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
- platform_set_drvdata(pdev, ndev);
- lp = netdev_priv(ndev);
- lp->ndev = ndev;
-
- /* Grab the MAC address in the MAC */
- *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
- *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
-
- /* probe mac */
- /*todo: how to probe? which is revision_register */
- bfin_write_EMAC_ADDRLO(0x12345678);
- if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
- dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
- rc = -ENODEV;
- goto out_err_probe_mac;
- }
-
-
- /*
- * Is it valid? (Did bootloader initialize it?)
- * Grab the MAC from the board somehow
- * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
- */
- if (!is_valid_ether_addr(ndev->dev_addr)) {
- if (bfin_get_ether_addr(ndev->dev_addr) ||
- !is_valid_ether_addr(ndev->dev_addr)) {
- /* Still not valid, get a random one */
- netdev_warn(ndev, "Setting Ethernet MAC to a random one\n");
- eth_hw_addr_random(ndev);
- }
- }
-
- setup_mac_addr(ndev->dev_addr);
-
- if (!dev_get_platdata(&pdev->dev)) {
- dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
- rc = -ENODEV;
- goto out_err_probe_mac;
- }
- pd = dev_get_platdata(&pdev->dev);
- lp->mii_bus = platform_get_drvdata(pd);
- if (!lp->mii_bus) {
- dev_err(&pdev->dev, "Cannot get mii_bus!\n");
- rc = -ENODEV;
- goto out_err_probe_mac;
- }
- lp->mii_bus->priv = ndev;
- mii_bus_data = dev_get_platdata(&pd->dev);
-
- rc = mii_probe(ndev, mii_bus_data->phy_mode);
- if (rc) {
- dev_err(&pdev->dev, "MII Probe failed!\n");
- goto out_err_mii_probe;
- }
-
- lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
- lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
-
- ndev->netdev_ops = &bfin_mac_netdev_ops;
- ndev->ethtool_ops = &bfin_mac_ethtool_ops;
-
- timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0);
-
- lp->flags = 0;
- netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
-
- spin_lock_init(&lp->lock);
-
- /* now, enable interrupts */
- /* register irq handler */
- rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
- 0, "EMAC_RX", ndev);
- if (rc) {
- dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
- rc = -EBUSY;
- goto out_err_request_irq;
- }
-
- rc = register_netdev(ndev);
- if (rc) {
- dev_err(&pdev->dev, "Cannot register net device!\n");
- goto out_err_reg_ndev;
- }
-
- bfin_mac_hwtstamp_init(ndev);
- rc = bfin_phc_init(ndev, &pdev->dev);
- if (rc) {
- dev_err(&pdev->dev, "Cannot register PHC device!\n");
- goto out_err_phc;
- }
-
- /* now, print out the card info, in a short format.. */
- netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
-
- return 0;
-
-out_err_phc:
-out_err_reg_ndev:
- free_irq(IRQ_MAC_RX, ndev);
-out_err_request_irq:
- netif_napi_del(&lp->napi);
-out_err_mii_probe:
- mdiobus_unregister(lp->mii_bus);
- mdiobus_free(lp->mii_bus);
-out_err_probe_mac:
- free_netdev(ndev);
-
- return rc;
-}
-
-static int bfin_mac_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct bfin_mac_local *lp = netdev_priv(ndev);
-
- bfin_phc_release(lp);
-
- lp->mii_bus->priv = NULL;
-
- unregister_netdev(ndev);
-
- netif_napi_del(&lp->napi);
-
- free_irq(IRQ_MAC_RX, ndev);
-
- free_netdev(ndev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- struct net_device *net_dev = platform_get_drvdata(pdev);
- struct bfin_mac_local *lp = netdev_priv(net_dev);
-
- if (lp->wol) {
- bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
- bfin_write_EMAC_WKUP_CTL(MPKE);
- enable_irq_wake(IRQ_MAC_WAKEDET);
- } else {
- if (netif_running(net_dev))
- bfin_mac_close(net_dev);
- }
-
- return 0;
-}
-
-static int bfin_mac_resume(struct platform_device *pdev)
-{
- struct net_device *net_dev = platform_get_drvdata(pdev);
- struct bfin_mac_local *lp = netdev_priv(net_dev);
-
- if (lp->wol) {
- bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
- bfin_write_EMAC_WKUP_CTL(0);
- disable_irq_wake(IRQ_MAC_WAKEDET);
- } else {
- if (netif_running(net_dev))
- bfin_mac_open(net_dev);
- }
-
- return 0;
-}
-#else
-#define bfin_mac_suspend NULL
-#define bfin_mac_resume NULL
-#endif /* CONFIG_PM */
-
-static int bfin_mii_bus_probe(struct platform_device *pdev)
-{
- struct mii_bus *miibus;
- struct bfin_mii_bus_platform_data *mii_bus_pd;
- const unsigned short *pin_req;
- int rc, i;
-
- mii_bus_pd = dev_get_platdata(&pdev->dev);
- if (!mii_bus_pd) {
- dev_err(&pdev->dev, "No peripherals in platform data!\n");
- return -EINVAL;
- }
-
- /*
- * We are setting up a network card,
- * so set the GPIO pins to Ethernet mode
- */
- pin_req = mii_bus_pd->mac_peripherals;
- rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
- if (rc) {
- dev_err(&pdev->dev, "Requesting peripherals failed!\n");
- return rc;
- }
-
- rc = -ENOMEM;
- miibus = mdiobus_alloc();
- if (miibus == NULL)
- goto out_err_alloc;
- miibus->read = bfin_mdiobus_read;
- miibus->write = bfin_mdiobus_write;
-
- miibus->parent = &pdev->dev;
- miibus->name = "bfin_mii_bus";
- miibus->phy_mask = mii_bus_pd->phy_mask;
-
- snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
- pdev->name, pdev->id);
-
- rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
- if (rc != mii_bus_pd->phydev_number)
- dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
- mii_bus_pd->phydev_number);
- for (i = 0; i < rc; ++i) {
- unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
- if (phyaddr < PHY_MAX_ADDR)
- miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
- else
- dev_err(&pdev->dev,
- "Invalid PHY address %i for phydev %i\n",
- phyaddr, i);
- }
-
- rc = mdiobus_register(miibus);
- if (rc) {
- dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
- goto out_err_irq_alloc;
- }
-
- platform_set_drvdata(pdev, miibus);
- return 0;
-
-out_err_irq_alloc:
- mdiobus_free(miibus);
-out_err_alloc:
- peripheral_free_list(pin_req);
-
- return rc;
-}
-
-static int bfin_mii_bus_remove(struct platform_device *pdev)
-{
- struct mii_bus *miibus = platform_get_drvdata(pdev);
- struct bfin_mii_bus_platform_data *mii_bus_pd =
- dev_get_platdata(&pdev->dev);
-
- mdiobus_unregister(miibus);
- mdiobus_free(miibus);
- peripheral_free_list(mii_bus_pd->mac_peripherals);
-
- return 0;
-}
-
-static struct platform_driver bfin_mii_bus_driver = {
- .probe = bfin_mii_bus_probe,
- .remove = bfin_mii_bus_remove,
- .driver = {
- .name = "bfin_mii_bus",
- },
-};
-
-static struct platform_driver bfin_mac_driver = {
- .probe = bfin_mac_probe,
- .remove = bfin_mac_remove,
- .resume = bfin_mac_resume,
- .suspend = bfin_mac_suspend,
- .driver = {
- .name = KBUILD_MODNAME,
- },
-};
-
-static struct platform_driver * const drivers[] = {
- &bfin_mii_bus_driver,
- &bfin_mac_driver,
-};
-
-static int __init bfin_mac_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_init(bfin_mac_init);
-
-static void __exit bfin_mac_cleanup(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_exit(bfin_mac_cleanup);
-
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
deleted file mode 100644
index 4ad5b9be3f84..000000000000
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Blackfin On-Chip MAC Driver
- *
- * Copyright 2004-2007 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- */
-#ifndef _BFIN_MAC_H_
-#define _BFIN_MAC_H_
-
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/timer.h>
-#include <linux/etherdevice.h>
-#include <linux/bfin_mac.h>
-
-/*
- * Disable hardware checksum for bug #5600 if writeback cache is
- * enabled. Otherwize, corrupted RX packet will be sent up stack
- * without error mark.
- */
-#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
-#define BFIN_MAC_CSUM_OFFLOAD
-#endif
-
-#define TX_RECLAIM_JIFFIES (HZ / 5)
-#define BFIN_MAC_RX_IRQ_DISABLED 1
-
-struct dma_descriptor {
- struct dma_descriptor *next_dma_desc;
- unsigned long start_addr;
- unsigned short config;
- unsigned short x_count;
-};
-
-struct status_area_rx {
-#if defined(BFIN_MAC_CSUM_OFFLOAD)
- unsigned short ip_hdr_csum; /* ip header checksum */
- /* ip payload(udp or tcp or others) checksum */
- unsigned short ip_payload_csum;
-#endif
- unsigned long status_word; /* the frame status word */
-};
-
-struct status_area_tx {
- unsigned long status_word; /* the frame status word */
-};
-
-/* use two descriptors for a packet */
-struct net_dma_desc_rx {
- struct net_dma_desc_rx *next;
- struct sk_buff *skb;
- struct dma_descriptor desc_a;
- struct dma_descriptor desc_b;
- struct status_area_rx status;
-};
-
-/* use two descriptors for a packet */
-struct net_dma_desc_tx {
- struct net_dma_desc_tx *next;
- struct sk_buff *skb;
- struct dma_descriptor desc_a;
- struct dma_descriptor desc_b;
- unsigned char packet[1560];
- struct status_area_tx status;
-};
-
-struct bfin_mac_local {
- spinlock_t lock;
-
- int wol; /* Wake On Lan */
- int irq_wake_requested;
- struct timer_list tx_reclaim_timer;
- struct net_device *ndev;
- struct napi_struct napi;
- unsigned long flags;
-
- /* Data for EMAC_VLAN1 regs */
- u16 vlan1_mask, vlan2_mask;
-
- /* MII and PHY stuffs */
- int old_link; /* used by bf537_adjust_link */
- int old_speed;
- int old_duplex;
-
- struct mii_bus *mii_bus;
-
-#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
- u32 addend;
- unsigned int shift;
- s32 max_ppb;
- struct hwtstamp_config stamp_cfg;
- struct ptp_clock_info caps;
- struct ptp_clock *clock;
- int phc_index;
- spinlock_t phc_lock; /* protects time lo/hi registers */
-#endif
-};
-
-int bfin_get_ether_addr(char *addr);
-
-#endif
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 7ec2d74f94d3..680a6d983f37 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -4,7 +4,7 @@
config DM9000
tristate "DM9000 support"
- depends on ARM || BLACKFIN || MIPS || COLDFIRE || NIOS2
+ depends on ARM || MIPS || COLDFIRE || NIOS2
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 4c2f612e4414..358820282ef0 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -5,8 +5,8 @@
config NET_VENDOR_SMSC
bool "SMC (SMSC)/Western Digital devices"
default y
- depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
- ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \
+ depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
+ ISA || MAC || MIPS || NIOS2 || PCI || \
PCMCIA || SUPERH || XTENSA || H8300
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -37,8 +37,8 @@ config SMC91X
select CRC32
select MII
depends on !OF || GPIOLIB
- depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
- M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA || H8300
+ depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
+ MIPS || NIOS2 || SUPERH || XTENSA || H8300
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -77,7 +77,7 @@ config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on (ARM || SUPERH || MN10300)
+ depends on (ARM || SUPERH)
---help---
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 08b17adf0a65..b337ee97e0c0 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -144,32 +144,6 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
#define SMC_IRQ_FLAGS (0)
-#elif defined(CONFIG_M32R)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-
-#define SMC_inb(a, r) inb(((u32)a) + (r))
-#define SMC_inw(a, r) inw(((u32)a) + (r))
-#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r))
-#define SMC_outw(lp, v, a, r) outw(v, ((u32)a) + (r))
-#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS (0)
-
-#define RPC_LSA_DEFAULT RPC_LED_TX_RX
-#define RPC_LSB_DEFAULT RPC_LED_100_10
-
-#elif defined(CONFIG_MN10300)
-
-/*
- * MN10300/AM33 configuration
- */
-
-#include <unit/smc91111.h>
-
#elif defined(CONFIG_ATARI)
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
deleted file mode 100644
index bdfeaf3d4fce..000000000000
--- a/drivers/net/ethernet/tile/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Tilera network device configuration
-#
-
-config TILE_NET
- tristate "Tilera GBE/XGBE network driver support"
- depends on TILE
- default y
- select CRC32
- select TILE_GXIO_MPIPE if TILEGX
- select HIGH_RES_TIMERS if TILEGX
- imply PTP_1588_CLOCK if TILEGX
- ---help---
- This is a standard Linux network device driver for the
- on-chip Tilera Gigabit Ethernet and XAUI interfaces.
-
- To compile this driver as a module, choose M here: the module
- will be called tile_net.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
deleted file mode 100644
index 3d0ae1f07fc9..000000000000
--- a/drivers/net/ethernet/tile/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the TILE on-chip networking support.
-#
-
-obj-$(CONFIG_TILE_NET) += tile_net.o
-ifdef CONFIG_TILEGX
-tile_net-y := tilegx.o
-else
-tile_net-y := tilepro.o
-endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
deleted file mode 100644
index b3e5816a4678..000000000000
--- a/drivers/net/ethernet/tile/tilegx.c
+++ /dev/null
@@ -1,2279 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-#include <linux/kernel.h> /* printk() */
-#include <linux/slab.h> /* kmalloc() */
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/irq.h>
-#include <linux/netdevice.h> /* struct device, and other headers */
-#include <linux/etherdevice.h> /* eth_type_trans */
-#include <linux/skbuff.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/hugetlb.h>
-#include <linux/in6.h>
-#include <linux/timer.h>
-#include <linux/hrtimer.h>
-#include <linux/ktime.h>
-#include <linux/io.h>
-#include <linux/ctype.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/sched/isolation.h>
-
-#include <asm/checksum.h>
-#include <asm/homecache.h>
-#include <gxio/mpipe.h>
-#include <arch/sim.h>
-
-/* Default transmit lockup timeout period, in jiffies. */
-#define TILE_NET_TIMEOUT (5 * HZ)
-
-/* The maximum number of distinct channels (idesc.channel is 5 bits). */
-#define TILE_NET_CHANNELS 32
-
-/* Maximum number of idescs to handle per "poll". */
-#define TILE_NET_BATCH 128
-
-/* Maximum number of packets to handle per "poll". */
-#define TILE_NET_WEIGHT 64
-
-/* Maximum Jumbo Packet MTU */
-#define TILE_JUMBO_MAX_MTU 9000
-
-/* Number of entries in each iqueue. */
-#define IQUEUE_ENTRIES 512
-
-/* Number of entries in each equeue. */
-#define EQUEUE_ENTRIES 2048
-
-/* Total header bytes per equeue slot. Must be big enough for 2 bytes
- * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
- * 60 bytes of actual TCP header. We round up to align to cache lines.
- */
-#define HEADER_BYTES 128
-
-/* Maximum completions per cpu per device (must be a power of two).
- * ISSUE: What is the right number here? If this is too small, then
- * egress might block waiting for free space in a completions array.
- * ISSUE: At the least, allocate these only for initialized echannels.
- */
-#define TILE_NET_MAX_COMPS 64
-
-#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
-
-/* The "kinds" of buffer stacks (small/large/jumbo). */
-#define MAX_KINDS 3
-
-/* Size of completions data to allocate.
- * ISSUE: Probably more than needed since we don't use all the channels.
- */
-#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
-
-/* Size of NotifRing data to allocate. */
-#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
-
-/* Timeout to wake the per-device TX timer after we stop the queue.
- * We don't want the timeout too short (adds overhead, and might end
- * up causing stop/wake/stop/wake cycles) or too long (affects performance).
- * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
- */
-#define TX_TIMER_DELAY_USEC 30
-
-/* Timeout to wake the per-cpu egress timer to free completions. */
-#define EGRESS_TIMER_DELAY_USEC 1000
-
-MODULE_AUTHOR("Tilera Corporation");
-MODULE_LICENSE("GPL");
-
-/* A "packet fragment" (a chunk of memory). */
-struct frag {
- void *buf;
- size_t length;
-};
-
-/* A single completion. */
-struct tile_net_comp {
- /* The "complete_count" when the completion will be complete. */
- s64 when;
- /* The buffer to be freed when the completion is complete. */
- struct sk_buff *skb;
-};
-
-/* The completions for a given cpu and echannel. */
-struct tile_net_comps {
- /* The completions. */
- struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
- /* The number of completions used. */
- unsigned long comp_next;
- /* The number of completions freed. */
- unsigned long comp_last;
-};
-
-/* The transmit wake timer for a given cpu and echannel. */
-struct tile_net_tx_wake {
- int tx_queue_idx;
- struct hrtimer timer;
- struct net_device *dev;
-};
-
-/* Info for a specific cpu. */
-struct tile_net_info {
- /* Our cpu. */
- int my_cpu;
- /* A timer for handling egress completions. */
- struct hrtimer egress_timer;
- /* True if "egress_timer" is scheduled. */
- bool egress_timer_scheduled;
- struct info_mpipe {
- /* Packet queue. */
- gxio_mpipe_iqueue_t iqueue;
- /* The NAPI struct. */
- struct napi_struct napi;
- /* Number of buffers (by kind) which must still be provided. */
- unsigned int num_needed_buffers[MAX_KINDS];
- /* instance id. */
- int instance;
- /* True if iqueue is valid. */
- bool has_iqueue;
- /* NAPI flags. */
- bool napi_added;
- bool napi_enabled;
- /* Comps for each egress channel. */
- struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
- /* Transmit wake timer for each egress channel. */
- struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
- } mpipe[NR_MPIPE_MAX];
-};
-
-/* Info for egress on a particular egress channel. */
-struct tile_net_egress {
- /* The "equeue". */
- gxio_mpipe_equeue_t *equeue;
- /* The headers for TSO. */
- unsigned char *headers;
-};
-
-/* Info for a specific device. */
-struct tile_net_priv {
- /* Our network device. */
- struct net_device *dev;
- /* The primary link. */
- gxio_mpipe_link_t link;
- /* The primary channel, if open, else -1. */
- int channel;
- /* The "loopify" egress link, if needed. */
- gxio_mpipe_link_t loopify_link;
- /* The "loopify" egress channel, if open, else -1. */
- int loopify_channel;
- /* The egress channel (channel or loopify_channel). */
- int echannel;
- /* mPIPE instance, 0 or 1. */
- int instance;
- /* The timestamp config. */
- struct hwtstamp_config stamp_cfg;
-};
-
-static struct mpipe_data {
- /* The ingress irq. */
- int ingress_irq;
-
- /* The "context" for all devices. */
- gxio_mpipe_context_t context;
-
- /* Egress info, indexed by "priv->echannel"
- * (lazily created as needed).
- */
- struct tile_net_egress
- egress_for_echannel[TILE_NET_CHANNELS];
-
- /* Devices currently associated with each channel.
- * NOTE: The array entry can become NULL after ifconfig down, but
- * we do not free the underlying net_device structures, so it is
- * safe to use a pointer after reading it from this array.
- */
- struct net_device
- *tile_net_devs_for_channel[TILE_NET_CHANNELS];
-
- /* The actual memory allocated for the buffer stacks. */
- void *buffer_stack_vas[MAX_KINDS];
-
- /* The amount of memory allocated for each buffer stack. */
- size_t buffer_stack_bytes[MAX_KINDS];
-
- /* The first buffer stack index
- * (small = +0, large = +1, jumbo = +2).
- */
- int first_buffer_stack;
-
- /* The buckets. */
- int first_bucket;
- int num_buckets;
-
- /* PTP-specific data. */
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info caps;
-
- /* Lock for ptp accessors. */
- struct mutex ptp_lock;
-
-} mpipe_data[NR_MPIPE_MAX] = {
- [0 ... (NR_MPIPE_MAX - 1)] {
- .ingress_irq = -1,
- .first_buffer_stack = -1,
- .first_bucket = -1,
- .num_buckets = 1
- }
-};
-
-/* A mutex for "tile_net_devs_for_channel". */
-static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
-
-/* The per-cpu info. */
-static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
-
-
-/* The buffer size enums for each buffer stack.
- * See arch/tile/include/gxio/mpipe.h for the set of possible values.
- * We avoid the "10384" size because it can induce "false chaining"
- * on "cut-through" jumbo packets.
- */
-static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
- GXIO_MPIPE_BUFFER_SIZE_128,
- GXIO_MPIPE_BUFFER_SIZE_1664,
- GXIO_MPIPE_BUFFER_SIZE_16384
-};
-
-/* Text value of tile_net.cpus if passed as a module parameter. */
-static char *network_cpus_string;
-
-/* The actual cpus in "network_cpus". */
-static struct cpumask network_cpus_map;
-
-/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
-static char *loopify_link_name;
-
-/* If "tile_net.custom" was specified, this is true. */
-static bool custom_flag;
-
-/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
-static uint jumbo_num;
-
-/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
-static inline int mpipe_instance(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- return priv->instance;
-}
-
-/* The "tile_net.cpus" argument specifies the cpus that are dedicated
- * to handle ingress packets.
- *
- * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
- * m, n, x, y are integer numbers that represent the cpus that can be
- * neither a dedicated cpu nor a dataplane cpu.
- */
-static bool network_cpus_init(void)
-{
- int rc;
-
- if (network_cpus_string == NULL)
- return false;
-
- rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
- if (rc != 0) {
- pr_warn("tile_net.cpus=%s: malformed cpu list\n",
- network_cpus_string);
- return false;
- }
-
- /* Remove dedicated cpus. */
- cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
-
- if (cpumask_empty(&network_cpus_map)) {
- pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
- network_cpus_string);
- return false;
- }
-
- pr_info("Linux network CPUs: %*pbl\n",
- cpumask_pr_args(&network_cpus_map));
- return true;
-}
-
-module_param_named(cpus, network_cpus_string, charp, 0444);
-MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
-
-/* The "tile_net.loopify=LINK" argument causes the named device to
- * actually use "loop0" for ingress, and "loop1" for egress. This
- * allows an app to sit between the actual link and linux, passing
- * (some) packets along to linux, and forwarding (some) packets sent
- * out by linux.
- */
-module_param_named(loopify, loopify_link_name, charp, 0444);
-MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
-
-/* The "tile_net.custom" argument causes us to ignore the "conventional"
- * classifier metadata, in particular, the "l2_offset".
- */
-module_param_named(custom, custom_flag, bool, 0444);
-MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
-
-/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
- * and to allocate the given number of "jumbo" buffers.
- */
-module_param_named(jumbo, jumbo_num, uint, 0444);
-MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
-
-/* Atomically update a statistics field.
- * Note that on TILE-Gx, this operation is fire-and-forget on the
- * issuing core (single-cycle dispatch) and takes only a few cycles
- * longer than a regular store when the request reaches the home cache.
- * No expensive bus management overhead is required.
- */
-static void tile_net_stats_add(unsigned long value, unsigned long *field)
-{
- BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
- atomic_long_add(value, (atomic_long_t *)field);
-}
-
-/* Allocate and push a buffer. */
-static bool tile_net_provide_buffer(int instance, int kind)
-{
- struct mpipe_data *md = &mpipe_data[instance];
- gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
- size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
- const unsigned long buffer_alignment = 128;
- struct sk_buff *skb;
- int len;
-
- len = sizeof(struct sk_buff **) + buffer_alignment + bs;
- skb = dev_alloc_skb(len);
- if (skb == NULL)
- return false;
-
- /* Make room for a back-pointer to 'skb' and guarantee alignment. */
- skb_reserve(skb, sizeof(struct sk_buff **));
- skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
-
- /* Save a back-pointer to 'skb'. */
- *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
-
- /* Make sure "skb" and the back-pointer have been flushed. */
- wmb();
-
- gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
- (void *)va_to_tile_io_addr(skb->data));
-
- return true;
-}
-
-/* Convert a raw mpipe buffer to its matching skb pointer. */
-static struct sk_buff *mpipe_buf_to_skb(void *va)
-{
- /* Acquire the associated "skb". */
- struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
- struct sk_buff *skb = *skb_ptr;
-
- /* Paranoia. */
- if (skb->data != va) {
- /* Panic here since there's a reasonable chance
- * that corrupt buffers means generic memory
- * corruption, with unpredictable system effects.
- */
- panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
- va, skb, skb->data);
- }
-
- return skb;
-}
-
-static void tile_net_pop_all_buffers(int instance, int stack)
-{
- struct mpipe_data *md = &mpipe_data[instance];
-
- for (;;) {
- tile_io_addr_t addr =
- (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
- stack);
- if (addr == 0)
- break;
- dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
- }
-}
-
-/* Provide linux buffers to mPIPE. */
-static void tile_net_provide_needed_buffers(void)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- int instance, kind;
- for (instance = 0; instance < NR_MPIPE_MAX &&
- info->mpipe[instance].has_iqueue; instance++) {
- for (kind = 0; kind < MAX_KINDS; kind++) {
- while (info->mpipe[instance].num_needed_buffers[kind]
- != 0) {
- if (!tile_net_provide_buffer(instance, kind)) {
- pr_notice("Tile %d still needs"
- " some buffers\n",
- info->my_cpu);
- return;
- }
- info->mpipe[instance].
- num_needed_buffers[kind]--;
- }
- }
- }
-}
-
-/* Get RX timestamp, and store it in the skb. */
-static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
- gxio_mpipe_idesc_t *idesc)
-{
- if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
- struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
- idesc->time_stamp_ns);
- }
-}
-
-/* Get TX timestamp, and store it in the skb. */
-static void tile_tx_timestamp(struct sk_buff *skb, int instance)
-{
- struct skb_shared_info *shtx = skb_shinfo(skb);
- if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
- struct mpipe_data *md = &mpipe_data[instance];
- struct skb_shared_hwtstamps shhwtstamps;
- struct timespec64 ts;
-
- shtx->tx_flags |= SKBTX_IN_PROGRESS;
- gxio_mpipe_get_timestamp(&md->context, &ts);
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
-}
-
-/* Use ioctl() to enable or disable TX or RX timestamping. */
-static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
-{
- struct hwtstamp_config config;
- struct tile_net_priv *priv = netdev_priv(dev);
-
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- if (config.flags) /* reserved for future extensions */
- return -EINVAL;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
- priv->stamp_cfg = config;
- return 0;
-}
-
-static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
- sizeof(priv->stamp_cfg)))
- return -EFAULT;
-
- return 0;
-}
-
-static inline bool filter_packet(struct net_device *dev, void *buf)
-{
- /* Filter packets received before we're up. */
- if (dev == NULL || !(dev->flags & IFF_UP))
- return true;
-
- /* Filter out packets that aren't for us. */
- if (!(dev->flags & IFF_PROMISC) &&
- !is_multicast_ether_addr(buf) &&
- !ether_addr_equal(dev->dev_addr, buf))
- return true;
-
- return false;
-}
-
-static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
- gxio_mpipe_idesc_t *idesc, unsigned long len)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- struct tile_net_priv *priv = netdev_priv(dev);
- int instance = priv->instance;
-
- /* Encode the actual packet length. */
- skb_put(skb, len);
-
- skb->protocol = eth_type_trans(skb, dev);
-
- /* Acknowledge "good" hardware checksums. */
- if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* Get RX timestamp from idesc. */
- tile_rx_timestamp(priv, skb, idesc);
-
- napi_gro_receive(&info->mpipe[instance].napi, skb);
-
- /* Update stats. */
- tile_net_stats_add(1, &dev->stats.rx_packets);
- tile_net_stats_add(len, &dev->stats.rx_bytes);
-
- /* Need a new buffer. */
- if (idesc->size == buffer_size_enums[0])
- info->mpipe[instance].num_needed_buffers[0]++;
- else if (idesc->size == buffer_size_enums[1])
- info->mpipe[instance].num_needed_buffers[1]++;
- else
- info->mpipe[instance].num_needed_buffers[2]++;
-}
-
-/* Handle a packet. Return true if "processed", false if "filtered". */
-static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- struct mpipe_data *md = &mpipe_data[instance];
- struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
- uint8_t l2_offset;
- void *va;
- void *buf;
- unsigned long len;
- bool filter;
-
- /* Drop packets for which no buffer was available (which can
- * happen under heavy load), or for which the me/tr/ce flags
- * are set (which can happen for jumbo cut-through packets,
- * or with a customized classifier).
- */
- if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
- if (dev)
- tile_net_stats_add(1, &dev->stats.rx_errors);
- goto drop;
- }
-
- /* Get the "l2_offset", if allowed. */
- l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
-
- /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
- va = tile_io_addr_to_va((unsigned long)idesc->va);
-
- /* Get the actual packet start/length. */
- buf = va + l2_offset;
- len = idesc->l2_size - l2_offset;
-
- /* Point "va" at the raw buffer. */
- va -= NET_IP_ALIGN;
-
- filter = filter_packet(dev, buf);
- if (filter) {
- if (dev)
- tile_net_stats_add(1, &dev->stats.rx_dropped);
-drop:
- gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
- } else {
- struct sk_buff *skb = mpipe_buf_to_skb(va);
-
- /* Skip headroom, and any custom header. */
- skb_reserve(skb, NET_IP_ALIGN + l2_offset);
-
- tile_net_receive_skb(dev, skb, idesc, len);
- }
-
- gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
- return !filter;
-}
-
-/* Handle some packets for the current CPU.
- *
- * This function handles up to TILE_NET_BATCH idescs per call.
- *
- * ISSUE: Since we do not provide new buffers until this function is
- * complete, we must initially provide enough buffers for each network
- * cpu to fill its iqueue and also its batched idescs.
- *
- * ISSUE: The "rotting packet" race condition occurs if a packet
- * arrives after the queue appears to be empty, and before the
- * hypervisor interrupt is re-enabled.
- */
-static int tile_net_poll(struct napi_struct *napi, int budget)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- unsigned int work = 0;
- gxio_mpipe_idesc_t *idesc;
- int instance, i, n;
- struct mpipe_data *md;
- struct info_mpipe *info_mpipe =
- container_of(napi, struct info_mpipe, napi);
-
- if (budget <= 0)
- goto done;
-
- instance = info_mpipe->instance;
- while ((n = gxio_mpipe_iqueue_try_peek(
- &info_mpipe->iqueue,
- &idesc)) > 0) {
- for (i = 0; i < n; i++) {
- if (i == TILE_NET_BATCH)
- goto done;
- if (tile_net_handle_packet(instance,
- idesc + i)) {
- if (++work >= budget)
- goto done;
- }
- }
- }
-
- /* There are no packets left. */
- napi_complete_done(&info_mpipe->napi, work);
-
- md = &mpipe_data[instance];
- /* Re-enable hypervisor interrupts. */
- gxio_mpipe_enable_notif_ring_interrupt(
- &md->context, info->mpipe[instance].iqueue.ring);
-
- /* HACK: Avoid the "rotting packet" problem. */
- if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
- napi_schedule(&info_mpipe->napi);
-
- /* ISSUE: Handle completions? */
-
-done:
- tile_net_provide_needed_buffers();
-
- return work;
-}
-
-/* Handle an ingress interrupt from an instance on the current cpu. */
-static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- napi_schedule(&info->mpipe[(uint64_t)id].napi);
- return IRQ_HANDLED;
-}
-
-/* Free some completions. This must be called with interrupts blocked. */
-static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
- struct tile_net_comps *comps,
- int limit, bool force_update)
-{
- int n = 0;
- while (comps->comp_last < comps->comp_next) {
- unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
- struct tile_net_comp *comp = &comps->comp_queue[cid];
- if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
- force_update || n == 0))
- break;
- dev_kfree_skb_irq(comp->skb);
- comps->comp_last++;
- if (++n == limit)
- break;
- }
- return n;
-}
-
-/* Add a completion. This must be called with interrupts blocked.
- * tile_net_equeue_try_reserve() will have ensured a free completion entry.
- */
-static void add_comp(gxio_mpipe_equeue_t *equeue,
- struct tile_net_comps *comps,
- uint64_t when, struct sk_buff *skb)
-{
- int cid = comps->comp_next % TILE_NET_MAX_COMPS;
- comps->comp_queue[cid].when = when;
- comps->comp_queue[cid].skb = skb;
- comps->comp_next++;
-}
-
-static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
- int tx_queue_idx)
-{
- struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
- struct tile_net_priv *priv = netdev_priv(dev);
- int instance = priv->instance;
- struct tile_net_tx_wake *tx_wake =
- &info->mpipe[instance].tx_wake[priv->echannel];
-
- hrtimer_start(&tx_wake->timer,
- TX_TIMER_DELAY_USEC * 1000UL,
- HRTIMER_MODE_REL_PINNED);
-}
-
-static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
-{
- struct tile_net_tx_wake *tx_wake =
- container_of(t, struct tile_net_tx_wake, timer);
- netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx);
- return HRTIMER_NORESTART;
-}
-
-/* Make sure the egress timer is scheduled. */
-static void tile_net_schedule_egress_timer(void)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
-
- if (!info->egress_timer_scheduled) {
- hrtimer_start(&info->egress_timer,
- EGRESS_TIMER_DELAY_USEC * 1000UL,
- HRTIMER_MODE_REL_PINNED);
- info->egress_timer_scheduled = true;
- }
-}
-
-/* The "function" for "info->egress_timer".
- *
- * This timer will reschedule itself as long as there are any pending
- * completions expected for this tile.
- */
-static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- unsigned long irqflags;
- bool pending = false;
- int i, instance;
-
- local_irq_save(irqflags);
-
- /* The timer is no longer scheduled. */
- info->egress_timer_scheduled = false;
-
- /* Free all possible comps for this tile. */
- for (instance = 0; instance < NR_MPIPE_MAX &&
- info->mpipe[instance].has_iqueue; instance++) {
- for (i = 0; i < TILE_NET_CHANNELS; i++) {
- struct tile_net_egress *egress =
- &mpipe_data[instance].egress_for_echannel[i];
- struct tile_net_comps *comps =
- info->mpipe[instance].comps_for_echannel[i];
- if (!egress || comps->comp_last >= comps->comp_next)
- continue;
- tile_net_free_comps(egress->equeue, comps, -1, true);
- pending = pending ||
- (comps->comp_last < comps->comp_next);
- }
- }
-
- /* Reschedule timer if needed. */
- if (pending)
- tile_net_schedule_egress_timer();
-
- local_irq_restore(irqflags);
-
- return HRTIMER_NORESTART;
-}
-
-/* PTP clock operations. */
-
-static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
- int ret = 0;
- struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
- mutex_lock(&md->ptp_lock);
- if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
- ret = -EINVAL;
- mutex_unlock(&md->ptp_lock);
- return ret;
-}
-
-static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- int ret = 0;
- struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
- mutex_lock(&md->ptp_lock);
- if (gxio_mpipe_adjust_timestamp(&md->context, delta))
- ret = -EBUSY;
- mutex_unlock(&md->ptp_lock);
- return ret;
-}
-
-static int ptp_mpipe_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
-{
- int ret = 0;
- struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
- mutex_lock(&md->ptp_lock);
- if (gxio_mpipe_get_timestamp(&md->context, ts))
- ret = -EBUSY;
- mutex_unlock(&md->ptp_lock);
- return ret;
-}
-
-static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- int ret = 0;
- struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
- mutex_lock(&md->ptp_lock);
- if (gxio_mpipe_set_timestamp(&md->context, ts))
- ret = -EBUSY;
- mutex_unlock(&md->ptp_lock);
- return ret;
-}
-
-static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *request, int on)
-{
- return -EOPNOTSUPP;
-}
-
-static const struct ptp_clock_info ptp_mpipe_caps = {
- .owner = THIS_MODULE,
- .name = "mPIPE clock",
- .max_adj = 999999999,
- .n_ext_ts = 0,
- .n_pins = 0,
- .pps = 0,
- .adjfreq = ptp_mpipe_adjfreq,
- .adjtime = ptp_mpipe_adjtime,
- .gettime64 = ptp_mpipe_gettime,
- .settime64 = ptp_mpipe_settime,
- .enable = ptp_mpipe_enable,
-};
-
-/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
-static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
-{
- struct timespec64 ts;
-
- ktime_get_ts64(&ts);
- gxio_mpipe_set_timestamp(&md->context, &ts);
-
- mutex_init(&md->ptp_lock);
- md->caps = ptp_mpipe_caps;
- md->ptp_clock = ptp_clock_register(&md->caps, NULL);
- if (IS_ERR(md->ptp_clock))
- netdev_err(dev, "ptp_clock_register failed %ld\n",
- PTR_ERR(md->ptp_clock));
-}
-
-/* Initialize PTP fields in a new device. */
-static void init_ptp_dev(struct tile_net_priv *priv)
-{
- priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
-}
-
-/* Helper functions for "tile_net_update()". */
-static void enable_ingress_irq(void *irq)
-{
- enable_percpu_irq((long)irq, 0);
-}
-
-static void disable_ingress_irq(void *irq)
-{
- disable_percpu_irq((long)irq);
-}
-
-/* Helper function for tile_net_open() and tile_net_stop().
- * Always called under tile_net_devs_for_channel_mutex.
- */
-static int tile_net_update(struct net_device *dev)
-{
- static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
- bool saw_channel = false;
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
- int channel;
- int rc;
- int cpu;
-
- saw_channel = false;
- gxio_mpipe_rules_init(&rules, &md->context);
-
- for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
- if (md->tile_net_devs_for_channel[channel] == NULL)
- continue;
- if (!saw_channel) {
- saw_channel = true;
- gxio_mpipe_rules_begin(&rules, md->first_bucket,
- md->num_buckets, NULL);
- gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
- }
- gxio_mpipe_rules_add_channel(&rules, channel);
- }
-
- /* NOTE: This can fail if there is no classifier.
- * ISSUE: Can anything else cause it to fail?
- */
- rc = gxio_mpipe_rules_commit(&rules);
- if (rc != 0) {
- netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
- instance, rc);
- return -EIO;
- }
-
- /* Update all cpus, sequentially (to protect "netif_napi_add()").
- * We use on_each_cpu to handle the IPI mask or unmask.
- */
- if (!saw_channel)
- on_each_cpu(disable_ingress_irq,
- (void *)(long)(md->ingress_irq), 1);
- for_each_online_cpu(cpu) {
- struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
-
- if (!info->mpipe[instance].has_iqueue)
- continue;
- if (saw_channel) {
- if (!info->mpipe[instance].napi_added) {
- netif_napi_add(dev, &info->mpipe[instance].napi,
- tile_net_poll, TILE_NET_WEIGHT);
- info->mpipe[instance].napi_added = true;
- }
- if (!info->mpipe[instance].napi_enabled) {
- napi_enable(&info->mpipe[instance].napi);
- info->mpipe[instance].napi_enabled = true;
- }
- } else {
- if (info->mpipe[instance].napi_enabled) {
- napi_disable(&info->mpipe[instance].napi);
- info->mpipe[instance].napi_enabled = false;
- }
- /* FIXME: Drain the iqueue. */
- }
- }
- if (saw_channel)
- on_each_cpu(enable_ingress_irq,
- (void *)(long)(md->ingress_irq), 1);
-
- /* HACK: Allow packets to flow in the simulator. */
- if (saw_channel)
- sim_enable_mpipe_links(instance, -1);
-
- return 0;
-}
-
-/* Initialize a buffer stack. */
-static int create_buffer_stack(struct net_device *dev,
- int kind, size_t num_buffers)
-{
- pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
- size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
- int stack_idx = md->first_buffer_stack + kind;
- void *va;
- int i, rc;
-
- /* Round up to 64KB and then use alloc_pages() so we get the
- * required 64KB alignment.
- */
- md->buffer_stack_bytes[kind] =
- ALIGN(needed, 64 * 1024);
-
- va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
- if (va == NULL) {
- netdev_err(dev,
- "Could not alloc %zd bytes for buffer stack %d\n",
- md->buffer_stack_bytes[kind], kind);
- return -ENOMEM;
- }
-
- /* Initialize the buffer stack. */
- rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
- buffer_size_enums[kind], va,
- md->buffer_stack_bytes[kind], 0);
- if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
- instance, rc);
- free_pages_exact(va, md->buffer_stack_bytes[kind]);
- return rc;
- }
-
- md->buffer_stack_vas[kind] = va;
-
- rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
- hash_pte, 0);
- if (rc != 0) {
- netdev_err(dev,
- "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
- instance, rc);
- return rc;
- }
-
- /* Provide initial buffers. */
- for (i = 0; i < num_buffers; i++) {
- if (!tile_net_provide_buffer(instance, kind)) {
- netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/* Allocate and initialize mpipe buffer stacks, and register them in
- * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int init_buffer_stacks(struct net_device *dev,
- int network_cpus_count)
-{
- int num_kinds = MAX_KINDS - (jumbo_num == 0);
- size_t num_buffers;
- int rc;
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
-
- /* Allocate the buffer stacks. */
- rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
- if (rc < 0) {
- netdev_err(dev,
- "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
- instance, rc);
- return rc;
- }
- md->first_buffer_stack = rc;
-
- /* Enough small/large buffers to (normally) avoid buffer errors. */
- num_buffers =
- network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
-
- /* Allocate the small memory stack. */
- if (rc >= 0)
- rc = create_buffer_stack(dev, 0, num_buffers);
-
- /* Allocate the large buffer stack. */
- if (rc >= 0)
- rc = create_buffer_stack(dev, 1, num_buffers);
-
- /* Allocate the jumbo buffer stack if needed. */
- if (rc >= 0 && jumbo_num != 0)
- rc = create_buffer_stack(dev, 2, jumbo_num);
-
- return rc;
-}
-
-/* Allocate per-cpu resources (memory for completions and idescs).
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int alloc_percpu_mpipe_resources(struct net_device *dev,
- int cpu, int ring)
-{
- struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- int order, i, rc;
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
- struct page *page;
- void *addr;
-
- /* Allocate the "comps". */
- order = get_order(COMPS_SIZE);
- page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
- if (page == NULL) {
- netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
- COMPS_SIZE);
- return -ENOMEM;
- }
- addr = pfn_to_kaddr(page_to_pfn(page));
- memset(addr, 0, COMPS_SIZE);
- for (i = 0; i < TILE_NET_CHANNELS; i++)
- info->mpipe[instance].comps_for_echannel[i] =
- addr + i * sizeof(struct tile_net_comps);
-
- /* If this is a network cpu, create an iqueue. */
- if (cpumask_test_cpu(cpu, &network_cpus_map)) {
- order = get_order(NOTIF_RING_SIZE);
- page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
- if (page == NULL) {
- netdev_err(dev,
- "Failed to alloc %zd bytes iqueue memory\n",
- NOTIF_RING_SIZE);
- return -ENOMEM;
- }
- addr = pfn_to_kaddr(page_to_pfn(page));
- rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
- &md->context, ring++, addr,
- NOTIF_RING_SIZE, 0);
- if (rc < 0) {
- netdev_err(dev,
- "gxio_mpipe_iqueue_init failed: %d\n", rc);
- return rc;
- }
- info->mpipe[instance].has_iqueue = true;
- }
-
- return ring;
-}
-
-/* Initialize NotifGroup and buckets.
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int init_notif_group_and_buckets(struct net_device *dev,
- int ring, int network_cpus_count)
-{
- int group, rc;
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
-
- /* Allocate one NotifGroup. */
- rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
- if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
- instance, rc);
- return rc;
- }
- group = rc;
-
- /* Initialize global num_buckets value. */
- if (network_cpus_count > 4)
- md->num_buckets = 256;
- else if (network_cpus_count > 1)
- md->num_buckets = 16;
-
- /* Allocate some buckets, and set global first_bucket value. */
- rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
- if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
- instance, rc);
- return rc;
- }
- md->first_bucket = rc;
-
- /* Init group and buckets. */
- rc = gxio_mpipe_init_notif_group_and_buckets(
- &md->context, group, ring, network_cpus_count,
- md->first_bucket, md->num_buckets,
- GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
- if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
- "mpipe[%d] %d\n", instance, rc);
- return rc;
- }
-
- return 0;
-}
-
-/* Create an irq and register it, then activate the irq and request
- * interrupts on all cores. Note that "ingress_irq" being initialized
- * is how we know not to call tile_net_init_mpipe() again.
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int tile_net_setup_interrupts(struct net_device *dev)
-{
- int cpu, rc, irq;
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
-
- irq = md->ingress_irq;
- if (irq < 0) {
- irq = irq_alloc_hwirq(-1);
- if (!irq) {
- netdev_err(dev,
- "create_irq failed: mpipe[%d] %d\n",
- instance, irq);
- return irq;
- }
- tile_irq_activate(irq, TILE_IRQ_PERCPU);
-
- rc = request_irq(irq, tile_net_handle_ingress_irq,
- 0, "tile_net", (void *)((uint64_t)instance));
-
- if (rc != 0) {
- netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
- instance, rc);
- irq_free_hwirq(irq);
- return rc;
- }
- md->ingress_irq = irq;
- }
-
- for_each_online_cpu(cpu) {
- struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- if (info->mpipe[instance].has_iqueue) {
- gxio_mpipe_request_notif_ring_interrupt(&md->context,
- cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
- info->mpipe[instance].iqueue.ring);
- }
- }
-
- return 0;
-}
-
-/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
-static void tile_net_init_mpipe_fail(int instance)
-{
- int kind, cpu;
- struct mpipe_data *md = &mpipe_data[instance];
-
- /* Do cleanups that require the mpipe context first. */
- for (kind = 0; kind < MAX_KINDS; kind++) {
- if (md->buffer_stack_vas[kind] != NULL) {
- tile_net_pop_all_buffers(instance,
- md->first_buffer_stack +
- kind);
- }
- }
-
- /* Destroy mpipe context so the hardware no longer owns any memory. */
- gxio_mpipe_destroy(&md->context);
-
- for_each_online_cpu(cpu) {
- struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- free_pages(
- (unsigned long)(
- info->mpipe[instance].comps_for_echannel[0]),
- get_order(COMPS_SIZE));
- info->mpipe[instance].comps_for_echannel[0] = NULL;
- free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
- get_order(NOTIF_RING_SIZE));
- info->mpipe[instance].iqueue.idescs = NULL;
- }
-
- for (kind = 0; kind < MAX_KINDS; kind++) {
- if (md->buffer_stack_vas[kind] != NULL) {
- free_pages_exact(md->buffer_stack_vas[kind],
- md->buffer_stack_bytes[kind]);
- md->buffer_stack_vas[kind] = NULL;
- }
- }
-
- md->first_buffer_stack = -1;
- md->first_bucket = -1;
-}
-
-/* The first time any tilegx network device is opened, we initialize
- * the global mpipe state. If this step fails, we fail to open the
- * device, but if it succeeds, we never need to do it again, and since
- * tile_net can't be unloaded, we never undo it.
- *
- * Note that some resources in this path (buffer stack indices,
- * bindings from init_buffer_stack, etc.) are hypervisor resources
- * that are freed implicitly by gxio_mpipe_destroy().
- */
-static int tile_net_init_mpipe(struct net_device *dev)
-{
- int rc;
- int cpu;
- int first_ring, ring;
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
- int network_cpus_count = cpumask_weight(&network_cpus_map);
-
- if (!hash_default) {
- netdev_err(dev, "Networking requires hash_default!\n");
- return -EIO;
- }
-
- rc = gxio_mpipe_init(&md->context, instance);
- if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
- instance, rc);
- return -EIO;
- }
-
- /* Set up the buffer stacks. */
- rc = init_buffer_stacks(dev, network_cpus_count);
- if (rc != 0)
- goto fail;
-
- /* Allocate one NotifRing for each network cpu. */
- rc = gxio_mpipe_alloc_notif_rings(&md->context,
- network_cpus_count, 0, 0);
- if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
- rc);
- goto fail;
- }
-
- /* Init NotifRings per-cpu. */
- first_ring = rc;
- ring = first_ring;
- for_each_online_cpu(cpu) {
- rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
- if (rc < 0)
- goto fail;
- ring = rc;
- }
-
- /* Initialize NotifGroup and buckets. */
- rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
- if (rc != 0)
- goto fail;
-
- /* Create and enable interrupts. */
- rc = tile_net_setup_interrupts(dev);
- if (rc != 0)
- goto fail;
-
- /* Register PTP clock and set mPIPE timestamp, if configured. */
- register_ptp_clock(dev, md);
-
- return 0;
-
-fail:
- tile_net_init_mpipe_fail(instance);
- return rc;
-}
-
-/* Create persistent egress info for a given egress channel.
- * Note that this may be shared between, say, "gbe0" and "xgbe0".
- * ISSUE: Defer header allocation until TSO is actually needed?
- */
-static int tile_net_init_egress(struct net_device *dev, int echannel)
-{
- static int ering = -1;
- struct page *headers_page, *edescs_page, *equeue_page;
- gxio_mpipe_edesc_t *edescs;
- gxio_mpipe_equeue_t *equeue;
- unsigned char *headers;
- int headers_order, edescs_order, equeue_order;
- size_t edescs_size;
- int rc = -ENOMEM;
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
-
- /* Only initialize once. */
- if (md->egress_for_echannel[echannel].equeue != NULL)
- return 0;
-
- /* Allocate memory for the "headers". */
- headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
- headers_page = alloc_pages(GFP_KERNEL, headers_order);
- if (headers_page == NULL) {
- netdev_warn(dev,
- "Could not alloc %zd bytes for TSO headers.\n",
- PAGE_SIZE << headers_order);
- goto fail;
- }
- headers = pfn_to_kaddr(page_to_pfn(headers_page));
-
- /* Allocate memory for the "edescs". */
- edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
- edescs_order = get_order(edescs_size);
- edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
- if (edescs_page == NULL) {
- netdev_warn(dev,
- "Could not alloc %zd bytes for eDMA ring.\n",
- edescs_size);
- goto fail_headers;
- }
- edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
-
- /* Allocate memory for the "equeue". */
- equeue_order = get_order(sizeof(*equeue));
- equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
- if (equeue_page == NULL) {
- netdev_warn(dev,
- "Could not alloc %zd bytes for equeue info.\n",
- PAGE_SIZE << equeue_order);
- goto fail_edescs;
- }
- equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
-
- /* Allocate an edma ring (using a one entry "free list"). */
- if (ering < 0) {
- rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
- if (rc < 0) {
- netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
- "mpipe[%d] %d\n", instance, rc);
- goto fail_equeue;
- }
- ering = rc;
- }
-
- /* Initialize the equeue. */
- rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
- edescs, edescs_size, 0);
- if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
- instance, rc);
- goto fail_equeue;
- }
-
- /* Don't reuse the ering later. */
- ering = -1;
-
- if (jumbo_num != 0) {
- /* Make sure "jumbo" packets can be egressed safely. */
- if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
- /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
- netdev_warn(dev, "Jumbo packets may not be egressed"
- " properly on channel %d\n", echannel);
- }
- }
-
- /* Done. */
- md->egress_for_echannel[echannel].equeue = equeue;
- md->egress_for_echannel[echannel].headers = headers;
- return 0;
-
-fail_equeue:
- __free_pages(equeue_page, equeue_order);
-
-fail_edescs:
- __free_pages(edescs_page, edescs_order);
-
-fail_headers:
- __free_pages(headers_page, headers_order);
-
-fail:
- return rc;
-}
-
-/* Return channel number for a newly-opened link. */
-static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
- const char *link_name)
-{
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
- int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
- if (rc < 0) {
- netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
- link_name, instance, rc);
- return rc;
- }
- if (jumbo_num != 0) {
- u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
- rc = gxio_mpipe_link_set_attr(link, attr, 1);
- if (rc != 0) {
- netdev_err(dev,
- "Cannot receive jumbo packets on '%s'\n",
- link_name);
- gxio_mpipe_link_close(link);
- return rc;
- }
- }
- rc = gxio_mpipe_link_channel(link);
- if (rc < 0 || rc >= TILE_NET_CHANNELS) {
- netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
- gxio_mpipe_link_close(link);
- return -EINVAL;
- }
- return rc;
-}
-
-/* Help the kernel activate the given network interface. */
-static int tile_net_open(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- int cpu, rc, instance;
-
- mutex_lock(&tile_net_devs_for_channel_mutex);
-
- /* Get the instance info. */
- rc = gxio_mpipe_link_instance(dev->name);
- if (rc < 0 || rc >= NR_MPIPE_MAX) {
- mutex_unlock(&tile_net_devs_for_channel_mutex);
- return -EIO;
- }
-
- priv->instance = rc;
- instance = rc;
- if (!mpipe_data[rc].context.mmio_fast_base) {
- /* Do one-time initialization per instance the first time
- * any device is opened.
- */
- rc = tile_net_init_mpipe(dev);
- if (rc != 0)
- goto fail;
- }
-
- /* Determine if this is the "loopify" device. */
- if (unlikely((loopify_link_name != NULL) &&
- !strcmp(dev->name, loopify_link_name))) {
- rc = tile_net_link_open(dev, &priv->link, "loop0");
- if (rc < 0)
- goto fail;
- priv->channel = rc;
- rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
- if (rc < 0)
- goto fail;
- priv->loopify_channel = rc;
- priv->echannel = rc;
- } else {
- rc = tile_net_link_open(dev, &priv->link, dev->name);
- if (rc < 0)
- goto fail;
- priv->channel = rc;
- priv->echannel = rc;
- }
-
- /* Initialize egress info (if needed). Once ever, per echannel. */
- rc = tile_net_init_egress(dev, priv->echannel);
- if (rc != 0)
- goto fail;
-
- mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
-
- rc = tile_net_update(dev);
- if (rc != 0)
- goto fail;
-
- mutex_unlock(&tile_net_devs_for_channel_mutex);
-
- /* Initialize the transmit wake timer for this device for each cpu. */
- for_each_online_cpu(cpu) {
- struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- struct tile_net_tx_wake *tx_wake =
- &info->mpipe[instance].tx_wake[priv->echannel];
-
- hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- tx_wake->tx_queue_idx = cpu;
- tx_wake->timer.function = tile_net_handle_tx_wake_timer;
- tx_wake->dev = dev;
- }
-
- for_each_online_cpu(cpu)
- netif_start_subqueue(dev, cpu);
- netif_carrier_on(dev);
- return 0;
-
-fail:
- if (priv->loopify_channel >= 0) {
- if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
- netdev_warn(dev, "Failed to close loopify link!\n");
- priv->loopify_channel = -1;
- }
- if (priv->channel >= 0) {
- if (gxio_mpipe_link_close(&priv->link) != 0)
- netdev_warn(dev, "Failed to close link!\n");
- priv->channel = -1;
- }
- priv->echannel = -1;
- mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
- mutex_unlock(&tile_net_devs_for_channel_mutex);
-
- /* Don't return raw gxio error codes to generic Linux. */
- return (rc > -512) ? rc : -EIO;
-}
-
-/* Help the kernel deactivate the given network interface. */
-static int tile_net_stop(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- int cpu;
- int instance = priv->instance;
- struct mpipe_data *md = &mpipe_data[instance];
-
- for_each_online_cpu(cpu) {
- struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- struct tile_net_tx_wake *tx_wake =
- &info->mpipe[instance].tx_wake[priv->echannel];
-
- hrtimer_cancel(&tx_wake->timer);
- netif_stop_subqueue(dev, cpu);
- }
-
- mutex_lock(&tile_net_devs_for_channel_mutex);
- md->tile_net_devs_for_channel[priv->channel] = NULL;
- (void)tile_net_update(dev);
- if (priv->loopify_channel >= 0) {
- if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
- netdev_warn(dev, "Failed to close loopify link!\n");
- priv->loopify_channel = -1;
- }
- if (priv->channel >= 0) {
- if (gxio_mpipe_link_close(&priv->link) != 0)
- netdev_warn(dev, "Failed to close link!\n");
- priv->channel = -1;
- }
- priv->echannel = -1;
- mutex_unlock(&tile_net_devs_for_channel_mutex);
-
- return 0;
-}
-
-/* Determine the VA for a fragment. */
-static inline void *tile_net_frag_buf(skb_frag_t *f)
-{
- unsigned long pfn = page_to_pfn(skb_frag_page(f));
- return pfn_to_kaddr(pfn) + f->page_offset;
-}
-
-/* Acquire a completion entry and an egress slot, or if we can't,
- * stop the queue and schedule the tx_wake timer.
- */
-static s64 tile_net_equeue_try_reserve(struct net_device *dev,
- int tx_queue_idx,
- struct tile_net_comps *comps,
- gxio_mpipe_equeue_t *equeue,
- int num_edescs)
-{
- /* Try to acquire a completion entry. */
- if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
- tile_net_free_comps(equeue, comps, 32, false) != 0) {
-
- /* Try to acquire an egress slot. */
- s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
- if (slot >= 0)
- return slot;
-
- /* Freeing some completions gives the equeue time to drain. */
- tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
-
- slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
- if (slot >= 0)
- return slot;
- }
-
- /* Still nothing; give up and stop the queue for a short while. */
- netif_stop_subqueue(dev, tx_queue_idx);
- tile_net_schedule_tx_wake_timer(dev, tx_queue_idx);
- return -1;
-}
-
-/* Determine how many edesc's are needed for TSO.
- *
- * Sometimes, if "sendfile()" requires copying, we will be called with
- * "data" containing the header and payload, with "frags" being empty.
- * Sometimes, for example when using NFS over TCP, a single segment can
- * span 3 fragments. This requires special care.
- */
-static int tso_count_edescs(struct sk_buff *skb)
-{
- struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- unsigned int data_len = skb->len - sh_len;
- unsigned int p_len = sh->gso_size;
- long f_id = -1; /* id of the current fragment */
- long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
- long f_used = 0; /* bytes used from the current fragment */
- long n; /* size of the current piece of payload */
- int num_edescs = 0;
- int segment;
-
- for (segment = 0; segment < sh->gso_segs; segment++) {
-
- unsigned int p_used = 0;
-
- /* One edesc for header and for each piece of the payload. */
- for (num_edescs++; p_used < p_len; num_edescs++) {
-
- /* Advance as needed. */
- while (f_used >= f_size) {
- f_id++;
- f_size = skb_frag_size(&sh->frags[f_id]);
- f_used = 0;
- }
-
- /* Use bytes from the current fragment. */
- n = p_len - p_used;
- if (n > f_size - f_used)
- n = f_size - f_used;
- f_used += n;
- p_used += n;
- }
-
- /* The last segment may be less than gso_size. */
- data_len -= p_len;
- if (data_len < p_len)
- p_len = data_len;
- }
-
- return num_edescs;
-}
-
-/* Prepare modified copies of the skbuff headers. */
-static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
- s64 slot)
-{
- struct skb_shared_info *sh = skb_shinfo(skb);
- struct iphdr *ih;
- struct ipv6hdr *ih6;
- struct tcphdr *th;
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- unsigned int data_len = skb->len - sh_len;
- unsigned char *data = skb->data;
- unsigned int ih_off, th_off, p_len;
- unsigned int isum_seed, tsum_seed, seq;
- unsigned int uninitialized_var(id);
- int is_ipv6;
- long f_id = -1; /* id of the current fragment */
- long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
- long f_used = 0; /* bytes used from the current fragment */
- long n; /* size of the current piece of payload */
- int segment;
-
- /* Locate original headers and compute various lengths. */
- is_ipv6 = skb_is_gso_v6(skb);
- if (is_ipv6) {
- ih6 = ipv6_hdr(skb);
- ih_off = skb_network_offset(skb);
- } else {
- ih = ip_hdr(skb);
- ih_off = skb_network_offset(skb);
- isum_seed = ((0xFFFF - ih->check) +
- (0xFFFF - ih->tot_len) +
- (0xFFFF - ih->id));
- id = ntohs(ih->id);
- }
-
- th = tcp_hdr(skb);
- th_off = skb_transport_offset(skb);
- p_len = sh->gso_size;
-
- tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
- seq = ntohl(th->seq);
-
- /* Prepare all the headers. */
- for (segment = 0; segment < sh->gso_segs; segment++) {
- unsigned char *buf;
- unsigned int p_used = 0;
-
- /* Copy to the header memory for this segment. */
- buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
- NET_IP_ALIGN;
- memcpy(buf, data, sh_len);
-
- /* Update copied ip header. */
- if (is_ipv6) {
- ih6 = (struct ipv6hdr *)(buf + ih_off);
- ih6->payload_len = htons(sh_len + p_len - ih_off -
- sizeof(*ih6));
- } else {
- ih = (struct iphdr *)(buf + ih_off);
- ih->tot_len = htons(sh_len + p_len - ih_off);
- ih->id = htons(id++);
- ih->check = csum_long(isum_seed + ih->tot_len +
- ih->id) ^ 0xffff;
- }
-
- /* Update copied tcp header. */
- th = (struct tcphdr *)(buf + th_off);
- th->seq = htonl(seq);
- th->check = csum_long(tsum_seed + htons(sh_len + p_len));
- if (segment != sh->gso_segs - 1) {
- th->fin = 0;
- th->psh = 0;
- }
-
- /* Skip past the header. */
- slot++;
-
- /* Skip past the payload. */
- while (p_used < p_len) {
-
- /* Advance as needed. */
- while (f_used >= f_size) {
- f_id++;
- f_size = skb_frag_size(&sh->frags[f_id]);
- f_used = 0;
- }
-
- /* Use bytes from the current fragment. */
- n = p_len - p_used;
- if (n > f_size - f_used)
- n = f_size - f_used;
- f_used += n;
- p_used += n;
-
- slot++;
- }
-
- seq += p_len;
-
- /* The last segment may be less than gso_size. */
- data_len -= p_len;
- if (data_len < p_len)
- p_len = data_len;
- }
-
- /* Flush the headers so they are ready for hardware DMA. */
- wmb();
-}
-
-/* Pass all the data to mpipe for egress. */
-static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
- struct sk_buff *skb, unsigned char *headers, s64 slot)
-{
- struct skb_shared_info *sh = skb_shinfo(skb);
- int instance = mpipe_instance(dev);
- struct mpipe_data *md = &mpipe_data[instance];
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- unsigned int data_len = skb->len - sh_len;
- unsigned int p_len = sh->gso_size;
- gxio_mpipe_edesc_t edesc_head = { { 0 } };
- gxio_mpipe_edesc_t edesc_body = { { 0 } };
- long f_id = -1; /* id of the current fragment */
- long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
- long f_used = 0; /* bytes used from the current fragment */
- void *f_data = skb->data + sh_len;
- long n; /* size of the current piece of payload */
- unsigned long tx_packets = 0, tx_bytes = 0;
- unsigned int csum_start;
- int segment;
-
- /* Prepare to egress the headers: set up header edesc. */
- csum_start = skb_checksum_start_offset(skb);
- edesc_head.csum = 1;
- edesc_head.csum_start = csum_start;
- edesc_head.csum_dest = csum_start + skb->csum_offset;
- edesc_head.xfer_size = sh_len;
-
- /* This is only used to specify the TLB. */
- edesc_head.stack_idx = md->first_buffer_stack;
- edesc_body.stack_idx = md->first_buffer_stack;
-
- /* Egress all the edescs. */
- for (segment = 0; segment < sh->gso_segs; segment++) {
- unsigned char *buf;
- unsigned int p_used = 0;
-
- /* Egress the header. */
- buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
- NET_IP_ALIGN;
- edesc_head.va = va_to_tile_io_addr(buf);
- gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
- slot++;
-
- /* Egress the payload. */
- while (p_used < p_len) {
- void *va;
-
- /* Advance as needed. */
- while (f_used >= f_size) {
- f_id++;
- f_size = skb_frag_size(&sh->frags[f_id]);
- f_data = tile_net_frag_buf(&sh->frags[f_id]);
- f_used = 0;
- }
-
- va = f_data + f_used;
-
- /* Use bytes from the current fragment. */
- n = p_len - p_used;
- if (n > f_size - f_used)
- n = f_size - f_used;
- f_used += n;
- p_used += n;
-
- /* Egress a piece of the payload. */
- edesc_body.va = va_to_tile_io_addr(va);
- edesc_body.xfer_size = n;
- edesc_body.bound = !(p_used < p_len);
- gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
- slot++;
- }
-
- tx_packets++;
- tx_bytes += sh_len + p_len;
-
- /* The last segment may be less than gso_size. */
- data_len -= p_len;
- if (data_len < p_len)
- p_len = data_len;
- }
-
- /* Update stats. */
- tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
- tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
-}
-
-/* Do "TSO" handling for egress.
- *
- * Normally drivers set NETIF_F_TSO only to support hardware TSO;
- * otherwise the stack uses scatter-gather to implement GSO in software.
- * On our testing, enabling GSO support (via NETIF_F_SG) drops network
- * performance down to around 7.5 Gbps on the 10G interfaces, although
- * also dropping cpu utilization way down, to under 8%. But
- * implementing "TSO" in the driver brings performance back up to line
- * rate, while dropping cpu usage even further, to less than 4%. In
- * practice, profiling of GSO shows that skb_segment() is what causes
- * the performance overheads; we benefit in the driver from using
- * preallocated memory to duplicate the TCP/IP headers.
- */
-static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- struct tile_net_priv *priv = netdev_priv(dev);
- int channel = priv->echannel;
- int instance = priv->instance;
- struct mpipe_data *md = &mpipe_data[instance];
- struct tile_net_egress *egress = &md->egress_for_echannel[channel];
- struct tile_net_comps *comps =
- info->mpipe[instance].comps_for_echannel[channel];
- gxio_mpipe_equeue_t *equeue = egress->equeue;
- unsigned long irqflags;
- int num_edescs;
- s64 slot;
-
- /* Determine how many mpipe edesc's are needed. */
- num_edescs = tso_count_edescs(skb);
-
- local_irq_save(irqflags);
-
- /* Try to acquire a completion entry and an egress slot. */
- slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
- equeue, num_edescs);
- if (slot < 0) {
- local_irq_restore(irqflags);
- return NETDEV_TX_BUSY;
- }
-
- /* Set up copies of header data properly. */
- tso_headers_prepare(skb, egress->headers, slot);
-
- /* Actually pass the data to the network hardware. */
- tso_egress(dev, equeue, skb, egress->headers, slot);
-
- /* Add a completion record. */
- add_comp(equeue, comps, slot + num_edescs - 1, skb);
-
- local_irq_restore(irqflags);
-
- /* Make sure the egress timer is scheduled. */
- tile_net_schedule_egress_timer();
-
- return NETDEV_TX_OK;
-}
-
-/* Analyze the body and frags for a transmit request. */
-static unsigned int tile_net_tx_frags(struct frag *frags,
- struct sk_buff *skb,
- void *b_data, unsigned int b_len)
-{
- unsigned int i, n = 0;
-
- struct skb_shared_info *sh = skb_shinfo(skb);
-
- if (b_len != 0) {
- frags[n].buf = b_data;
- frags[n++].length = b_len;
- }
-
- for (i = 0; i < sh->nr_frags; i++) {
- skb_frag_t *f = &sh->frags[i];
- frags[n].buf = tile_net_frag_buf(f);
- frags[n++].length = skb_frag_size(f);
- }
-
- return n;
-}
-
-/* Help the kernel transmit a packet. */
-static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- struct tile_net_priv *priv = netdev_priv(dev);
- int instance = priv->instance;
- struct mpipe_data *md = &mpipe_data[instance];
- struct tile_net_egress *egress =
- &md->egress_for_echannel[priv->echannel];
- gxio_mpipe_equeue_t *equeue = egress->equeue;
- struct tile_net_comps *comps =
- info->mpipe[instance].comps_for_echannel[priv->echannel];
- unsigned int len = skb->len;
- unsigned char *data = skb->data;
- unsigned int num_edescs;
- struct frag frags[MAX_FRAGS];
- gxio_mpipe_edesc_t edescs[MAX_FRAGS];
- unsigned long irqflags;
- gxio_mpipe_edesc_t edesc = { { 0 } };
- unsigned int i;
- s64 slot;
-
- if (skb_is_gso(skb))
- return tile_net_tx_tso(skb, dev);
-
- num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
-
- /* This is only used to specify the TLB. */
- edesc.stack_idx = md->first_buffer_stack;
-
- /* Prepare the edescs. */
- for (i = 0; i < num_edescs; i++) {
- edesc.xfer_size = frags[i].length;
- edesc.va = va_to_tile_io_addr(frags[i].buf);
- edescs[i] = edesc;
- }
-
- /* Mark the final edesc. */
- edescs[num_edescs - 1].bound = 1;
-
- /* Add checksum info to the initial edesc, if needed. */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- unsigned int csum_start = skb_checksum_start_offset(skb);
- edescs[0].csum = 1;
- edescs[0].csum_start = csum_start;
- edescs[0].csum_dest = csum_start + skb->csum_offset;
- }
-
- local_irq_save(irqflags);
-
- /* Try to acquire a completion entry and an egress slot. */
- slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
- equeue, num_edescs);
- if (slot < 0) {
- local_irq_restore(irqflags);
- return NETDEV_TX_BUSY;
- }
-
- for (i = 0; i < num_edescs; i++)
- gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
-
- /* Store TX timestamp if needed. */
- tile_tx_timestamp(skb, instance);
-
- /* Add a completion record. */
- add_comp(equeue, comps, slot - 1, skb);
-
- /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
- tile_net_stats_add(1, &dev->stats.tx_packets);
- tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
- &dev->stats.tx_bytes);
-
- local_irq_restore(irqflags);
-
- /* Make sure the egress timer is scheduled. */
- tile_net_schedule_egress_timer();
-
- return NETDEV_TX_OK;
-}
-
-/* Return subqueue id on this core (one per core). */
-static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- return smp_processor_id();
-}
-
-/* Deal with a transmit timeout. */
-static void tile_net_tx_timeout(struct net_device *dev)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- netif_wake_subqueue(dev, cpu);
-}
-
-/* Ioctl commands. */
-static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (cmd == SIOCSHWTSTAMP)
- return tile_hwtstamp_set(dev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return tile_hwtstamp_get(dev, rq);
-
- return -EOPNOTSUPP;
-}
-
-/* Change the Ethernet address of the NIC.
- *
- * The hypervisor driver does not support changing MAC address. However,
- * the hardware does not do anything with the MAC address, so the address
- * which gets used on outgoing packets, and which is accepted on incoming
- * packets, is completely up to us.
- *
- * Returns 0 on success, negative on failure.
- */
-static int tile_net_set_mac_address(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- return 0;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void tile_net_netpoll(struct net_device *dev)
-{
- int instance = mpipe_instance(dev);
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- struct mpipe_data *md = &mpipe_data[instance];
-
- disable_percpu_irq(md->ingress_irq);
- napi_schedule(&info->mpipe[instance].napi);
- enable_percpu_irq(md->ingress_irq, 0);
-}
-#endif
-
-static const struct net_device_ops tile_net_ops = {
- .ndo_open = tile_net_open,
- .ndo_stop = tile_net_stop,
- .ndo_start_xmit = tile_net_tx,
- .ndo_select_queue = tile_net_select_queue,
- .ndo_do_ioctl = tile_net_ioctl,
- .ndo_tx_timeout = tile_net_tx_timeout,
- .ndo_set_mac_address = tile_net_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tile_net_netpoll,
-#endif
-};
-
-/* The setup function.
- *
- * This uses ether_setup() to assign various fields in dev, including
- * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
- */
-static void tile_net_setup(struct net_device *dev)
-{
- netdev_features_t features = 0;
-
- ether_setup(dev);
- dev->netdev_ops = &tile_net_ops;
- dev->watchdog_timeo = TILE_NET_TIMEOUT;
-
- /* MTU range: 68 - 1500 or 9000 */
- dev->mtu = ETH_DATA_LEN;
- dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = jumbo_num ? TILE_JUMBO_MAX_MTU : ETH_DATA_LEN;
-
- features |= NETIF_F_HW_CSUM;
- features |= NETIF_F_SG;
- features |= NETIF_F_TSO;
- features |= NETIF_F_TSO6;
-
- dev->hw_features |= features;
- dev->vlan_features |= features;
- dev->features |= features;
-}
-
-/* Allocate the device structure, register the device, and obtain the
- * MAC address from the hypervisor.
- */
-static void tile_net_dev_init(const char *name, const uint8_t *mac)
-{
- int ret;
- struct net_device *dev;
- struct tile_net_priv *priv;
-
- /* HACK: Ignore "loop" links. */
- if (strncmp(name, "loop", 4) == 0)
- return;
-
- /* Allocate the device structure. Normally, "name" is a
- * template, instantiated by register_netdev(), but not for us.
- */
- dev = alloc_netdev_mqs(sizeof(*priv), name, NET_NAME_UNKNOWN,
- tile_net_setup, NR_CPUS, 1);
- if (!dev) {
- pr_err("alloc_netdev_mqs(%s) failed\n", name);
- return;
- }
-
- /* Initialize "priv". */
- priv = netdev_priv(dev);
- priv->dev = dev;
- priv->channel = -1;
- priv->loopify_channel = -1;
- priv->echannel = -1;
- init_ptp_dev(priv);
-
- /* Get the MAC address and set it in the device struct; this must
- * be done before the device is opened. If the MAC is all zeroes,
- * we use a random address, since we're probably on the simulator.
- */
- if (!is_zero_ether_addr(mac))
- ether_addr_copy(dev->dev_addr, mac);
- else
- eth_hw_addr_random(dev);
-
- /* Register the network device. */
- ret = register_netdev(dev);
- if (ret) {
- netdev_err(dev, "register_netdev failed %d\n", ret);
- free_netdev(dev);
- return;
- }
-}
-
-/* Per-cpu module initialization. */
-static void tile_net_init_module_percpu(void *unused)
-{
- struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
- int my_cpu = smp_processor_id();
- int instance;
-
- for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
- info->mpipe[instance].has_iqueue = false;
- info->mpipe[instance].instance = instance;
- }
- info->my_cpu = my_cpu;
-
- /* Initialize the egress timer. */
- hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- info->egress_timer.function = tile_net_handle_egress_timer;
-}
-
-/* Module initialization. */
-static int __init tile_net_init_module(void)
-{
- int i;
- char name[GXIO_MPIPE_LINK_NAME_LEN];
- uint8_t mac[6];
-
- pr_info("Tilera Network Driver\n");
-
- BUILD_BUG_ON(NR_MPIPE_MAX != 2);
-
- mutex_init(&tile_net_devs_for_channel_mutex);
-
- /* Initialize each CPU. */
- on_each_cpu(tile_net_init_module_percpu, NULL, 1);
-
- /* Find out what devices we have, and initialize them. */
- for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
- tile_net_dev_init(name, mac);
-
- if (!network_cpus_init())
- cpumask_and(&network_cpus_map,
- housekeeping_cpumask(HK_FLAG_MISC), cpu_online_mask);
-
- return 0;
-}
-
-module_init(tile_net_init_module);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
deleted file mode 100644
index 56d06282fbde..000000000000
--- a/drivers/net/ethernet/tile/tilepro.c
+++ /dev/null
@@ -1,2397 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-#include <linux/kernel.h> /* printk() */
-#include <linux/slab.h> /* kmalloc() */
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/netdevice.h> /* struct device, and other headers */
-#include <linux/etherdevice.h> /* eth_type_trans */
-#include <linux/skbuff.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/hugetlb.h>
-#include <linux/in6.h>
-#include <linux/timer.h>
-#include <linux/io.h>
-#include <linux/u64_stats_sync.h>
-#include <asm/checksum.h>
-#include <asm/homecache.h>
-
-#include <hv/drv_xgbe_intf.h>
-#include <hv/drv_xgbe_impl.h>
-#include <hv/hypervisor.h>
-#include <hv/netio_intf.h>
-
-/* For TSO */
-#include <linux/ip.h>
-#include <linux/tcp.h>
-
-
-/*
- * First, "tile_net_init_module()" initializes all four "devices" which
- * can be used by linux.
- *
- * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes
- * the network cpus, then uses "tile_net_open_aux()" to initialize
- * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all
- * the tiles, provide buffers to LIPP, allow ingress to start, and
- * turn on hypervisor interrupt handling (and NAPI) on all tiles.
- *
- * If registration fails due to the link being down, then "retry_work"
- * is used to keep calling "tile_net_open_inner()" until it succeeds.
- *
- * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to
- * stop egress, drain the LIPP buffers, unregister all the tiles, stop
- * LIPP/LEPP, and wipe the LEPP queue.
- *
- * We start out with the ingress interrupt enabled on each CPU. When
- * this interrupt fires, we disable it, and call "napi_schedule()".
- * This will cause "tile_net_poll()" to be called, which will pull
- * packets from the netio queue, filtering them out, or passing them
- * to "netif_receive_skb()". If our budget is exhausted, we will
- * return, knowing we will be called again later. Otherwise, we
- * reenable the ingress interrupt, and call "napi_complete()".
- *
- * HACK: Since disabling the ingress interrupt is not reliable, we
- * ignore the interrupt if the global "active" flag is false.
- *
- *
- * NOTE: The use of "native_driver" ensures that EPP exists, and that
- * we are using "LIPP" and "LEPP".
- *
- * NOTE: Failing to free completions for an arbitrarily long time
- * (which is defined to be illegal) does in fact cause bizarre
- * problems. The "egress_timer" helps prevent this from happening.
- */
-
-
-/* HACK: Allow use of "jumbo" packets. */
-/* This should be 1500 if "jumbo" is not set in LIPP. */
-/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
-/* ISSUE: This has not been thoroughly tested (except at 1500). */
-#define TILE_NET_MTU ETH_DATA_LEN
-
-/* HACK: Define this to verify incoming packets. */
-/* #define TILE_NET_VERIFY_INGRESS */
-
-/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */
-#define TILE_NET_TX_QUEUE_LEN 0
-
-/* Define to dump packets (prints out the whole packet on tx and rx). */
-/* #define TILE_NET_DUMP_PACKETS */
-
-/* Define to enable debug spew (all PDEBUG's are enabled). */
-/* #define TILE_NET_DEBUG */
-
-
-/* Define to activate paranoia checks. */
-/* #define TILE_NET_PARANOIA */
-
-/* Default transmit lockup timeout period, in jiffies. */
-#define TILE_NET_TIMEOUT (5 * HZ)
-
-/* Default retry interval for bringing up the NetIO interface, in jiffies. */
-#define TILE_NET_RETRY_INTERVAL (5 * HZ)
-
-/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */
-#define TILE_NET_DEVS 4
-
-
-
-/* Paranoia. */
-#if NET_IP_ALIGN != LIPP_PACKET_PADDING
-#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
-#endif
-
-
-/* Debug print. */
-#ifdef TILE_NET_DEBUG
-#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
-#else
-#define PDEBUG(fmt, args...)
-#endif
-
-
-MODULE_AUTHOR("Tilera");
-MODULE_LICENSE("GPL");
-
-
-/*
- * Queue of incoming packets for a specific cpu and device.
- *
- * Includes a pointer to the "system" data, and the actual "user" data.
- */
-struct tile_netio_queue {
- netio_queue_impl_t *__system_part;
- netio_queue_user_impl_t __user_part;
-
-};
-
-
-/*
- * Statistics counters for a specific cpu and device.
- */
-struct tile_net_stats_t {
- struct u64_stats_sync syncp;
- u64 rx_packets; /* total packets received */
- u64 tx_packets; /* total packets transmitted */
- u64 rx_bytes; /* total bytes received */
- u64 tx_bytes; /* total bytes transmitted */
- u64 rx_errors; /* packets truncated or marked bad by hw */
- u64 rx_dropped; /* packets not for us or intf not up */
-};
-
-
-/*
- * Info for a specific cpu and device.
- *
- * ISSUE: There is a "dev" pointer in "napi" as well.
- */
-struct tile_net_cpu {
- /* The NAPI struct. */
- struct napi_struct napi;
- /* Packet queue. */
- struct tile_netio_queue queue;
- /* Statistics. */
- struct tile_net_stats_t stats;
- /* True iff NAPI is enabled. */
- bool napi_enabled;
- /* True if this tile has successfully registered with the IPP. */
- bool registered;
- /* True if the link was down last time we tried to register. */
- bool link_down;
- /* True if "egress_timer" is scheduled. */
- bool egress_timer_scheduled;
- /* Number of small sk_buffs which must still be provided. */
- unsigned int num_needed_small_buffers;
- /* Number of large sk_buffs which must still be provided. */
- unsigned int num_needed_large_buffers;
- /* A timer for handling egress completions. */
- struct timer_list egress_timer;
-};
-
-
-/*
- * Info for a specific device.
- */
-struct tile_net_priv {
- /* Our network device. */
- struct net_device *dev;
- /* Pages making up the egress queue. */
- struct page *eq_pages;
- /* Address of the actual egress queue. */
- lepp_queue_t *eq;
- /* Protects "eq". */
- spinlock_t eq_lock;
- /* The hypervisor handle for this interface. */
- int hv_devhdl;
- /* The intr bit mask that IDs this device. */
- u32 intr_id;
- /* True iff "tile_net_open_aux()" has succeeded. */
- bool partly_opened;
- /* True iff the device is "active". */
- bool active;
- /* Effective network cpus. */
- struct cpumask network_cpus_map;
- /* Number of network cpus. */
- int network_cpus_count;
- /* Credits per network cpu. */
- int network_cpus_credits;
- /* For NetIO bringup retries. */
- struct delayed_work retry_work;
- /* Quick access to per cpu data. */
- struct tile_net_cpu *cpu[NR_CPUS];
-};
-
-/* Log2 of the number of small pages needed for the egress queue. */
-#define EQ_ORDER get_order(sizeof(lepp_queue_t))
-/* Size of the egress queue's pages. */
-#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER))
-
-/*
- * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
- */
-static struct net_device *tile_net_devs[TILE_NET_DEVS];
-
-/*
- * The "tile_net_cpu" structures for each device.
- */
-static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
-static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
-static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
-static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
-
-
-/*
- * True if "network_cpus" was specified.
- */
-static bool network_cpus_used;
-
-/*
- * The actual cpus in "network_cpus".
- */
-static struct cpumask network_cpus_map;
-
-
-
-#ifdef TILE_NET_DEBUG
-/*
- * printk with extra stuff.
- *
- * We print the CPU we're running in brackets.
- */
-static void net_printk(char *fmt, ...)
-{
- int i;
- int len;
- va_list args;
- static char buf[256];
-
- len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
- va_start(args, fmt);
- i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
- va_end(args);
- buf[255] = '\0';
- pr_notice(buf);
-}
-#endif
-
-
-#ifdef TILE_NET_DUMP_PACKETS
-/*
- * Dump a packet.
- */
-static void dump_packet(unsigned char *data, unsigned long length, char *s)
-{
- int my_cpu = smp_processor_id();
-
- unsigned long i;
- char buf[128];
-
- static unsigned int count;
-
- pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
- data, length, s, count++);
-
- pr_info("\n");
-
- for (i = 0; i < length; i++) {
- if ((i & 0xf) == 0)
- sprintf(buf, "[%02d] %8.8lx:", my_cpu, i);
- sprintf(buf + strlen(buf), " %2.2x", data[i]);
- if ((i & 0xf) == 0xf || i == length - 1) {
- strcat(buf, "\n");
- pr_info("%s", buf);
- }
- }
-}
-#endif
-
-
-/*
- * Provide support for the __netio_fastio1() swint
- * (see <hv/drv_xgbe_intf.h> for how it is used).
- *
- * The fastio swint2 call may clobber all the caller-saved registers.
- * It rarely clobbers memory, but we allow for the possibility in
- * the signature just to be on the safe side.
- *
- * Also, gcc doesn't seem to allow an input operand to be
- * clobbered, so we fake it with dummy outputs.
- *
- * This function can't be static because of the way it is declared
- * in the netio header.
- */
-inline int __netio_fastio1(u32 fastio_index, u32 arg0)
-{
- long result, clobber_r1, clobber_r10;
- asm volatile("swint2"
- : "=R00" (result),
- "=R01" (clobber_r1), "=R10" (clobber_r10)
- : "R10" (fastio_index), "R01" (arg0)
- : "memory", "r2", "r3", "r4",
- "r5", "r6", "r7", "r8", "r9",
- "r11", "r12", "r13", "r14",
- "r15", "r16", "r17", "r18", "r19",
- "r20", "r21", "r22", "r23", "r24",
- "r25", "r26", "r27", "r28", "r29");
- return result;
-}
-
-
-static void tile_net_return_credit(struct tile_net_cpu *info)
-{
- struct tile_netio_queue *queue = &info->queue;
- netio_queue_user_impl_t *qup = &queue->__user_part;
-
- /* Return four credits after every fourth packet. */
- if (--qup->__receive_credit_remaining == 0) {
- u32 interval = qup->__receive_credit_interval;
- qup->__receive_credit_remaining = interval;
- __netio_fastio_return_credits(qup->__fastio_index, interval);
- }
-}
-
-
-
-/*
- * Provide a linux buffer to LIPP.
- */
-static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
- void *va, bool small)
-{
- struct tile_netio_queue *queue = &info->queue;
-
- /* Convert "va" and "small" to "linux_buffer_t". */
- unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
-
- __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
-}
-
-
-/*
- * Provide a linux buffer for LIPP.
- *
- * Note that the ACTUAL allocation for each buffer is a "struct sk_buff",
- * plus a chunk of memory that includes not only the requested bytes, but
- * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info".
- *
- * Note that "struct skb_shared_info" is 88 bytes with 64K pages and
- * 268 bytes with 4K pages (since the frags[] array needs 18 entries).
- *
- * Without jumbo packets, the maximum packet size will be 1536 bytes,
- * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told
- * the hardware to clip at 1518 bytes instead of 1536 bytes, then we
- * could save an entire cache line, but in practice, we don't need it.
- *
- * Since CPAs are 38 bits, and we can only encode the high 31 bits in
- * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must
- * align the actual "va" mod 128.
- *
- * We assume that the underlying "head" will be aligned mod 64. Note
- * that in practice, we have seen "head" NOT aligned mod 128 even when
- * using 2048 byte allocations, which is surprising.
- *
- * If "head" WAS always aligned mod 128, we could change LIPP to
- * assume that the low SIX bits are zero, and the 7th bit is one, that
- * is, align the actual "va" mod 128 plus 64, which would be "free".
- *
- * For now, the actual "head" pointer points at NET_SKB_PAD bytes of
- * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff
- * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for
- * the actual packet, plus 62 bytes of empty padding, plus some
- * padding and the "struct skb_shared_info".
- *
- * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88
- * bytes, or 1816 bytes, which fits comfortably into 2048 bytes.
- *
- * With 64K pages, a small buffer thus needs 32+92+4+2+126+88
- * bytes, or 344 bytes, which means we are wasting 64+ bytes, and
- * could presumably increase the size of small buffers.
- *
- * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268
- * bytes, or 1996 bytes, which fits comfortably into 2048 bytes.
- *
- * With 4K pages, a small buffer thus needs 32+92+4+2+126+268
- * bytes, or 524 bytes, which is annoyingly wasteful.
- *
- * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192?
- *
- * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64?
- */
-static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
- bool small)
-{
-#if TILE_NET_MTU <= 1536
- /* Without "jumbo", 2 + 1536 should be sufficient. */
- unsigned int large_size = NET_IP_ALIGN + 1536;
-#else
- /* ISSUE: This has not been tested. */
- unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
-#endif
-
- /* Avoid "false sharing" with last cache line. */
- /* ISSUE: This is already done by "netdev_alloc_skb()". */
- unsigned int len =
- (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
- CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
-
- unsigned int padding = 128 - NET_SKB_PAD;
- unsigned int align;
-
- struct sk_buff *skb;
- void *va;
-
- struct sk_buff **skb_ptr;
-
- /* Request 96 extra bytes for alignment purposes. */
- skb = netdev_alloc_skb(info->napi.dev, len + padding);
- if (skb == NULL)
- return false;
-
- /* Skip 32 or 96 bytes to align "data" mod 128. */
- align = -(long)skb->data & (128 - 1);
- BUG_ON(align > padding);
- skb_reserve(skb, align);
-
- /* This address is given to IPP. */
- va = skb->data;
-
- /* Buffers must not span a huge page. */
- BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0);
-
-#ifdef TILE_NET_PARANOIA
-#if CHIP_HAS_CBOX_HOME_MAP()
- if (hash_default) {
- HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
- if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
- panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx",
- va, hv_pte_get_mode(pte), hv_pte_val(pte));
- }
-#endif
-#endif
-
- /* Invalidate the packet buffer. */
- if (!hash_default)
- __inv_buffer(va, len);
-
- /* Skip two bytes to satisfy LIPP assumptions. */
- /* Note that this aligns IP on a 16 byte boundary. */
- /* ISSUE: Do this when the packet arrives? */
- skb_reserve(skb, NET_IP_ALIGN);
-
- /* Save a back-pointer to 'skb'. */
- skb_ptr = va - sizeof(*skb_ptr);
- *skb_ptr = skb;
-
- /* Make sure "skb_ptr" has been flushed. */
- __insn_mf();
-
- /* Provide the new buffer. */
- tile_net_provide_linux_buffer(info, va, small);
-
- return true;
-}
-
-
-/*
- * Provide linux buffers for LIPP.
- */
-static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
-{
- while (info->num_needed_small_buffers != 0) {
- if (!tile_net_provide_needed_buffer(info, true))
- goto oops;
- info->num_needed_small_buffers--;
- }
-
- while (info->num_needed_large_buffers != 0) {
- if (!tile_net_provide_needed_buffer(info, false))
- goto oops;
- info->num_needed_large_buffers--;
- }
-
- return;
-
-oops:
-
- /* Add a description to the page allocation failure dump. */
- pr_notice("Could not provide a linux buffer to LIPP.\n");
-}
-
-
-/*
- * Grab some LEPP completions, and store them in "comps", of size
- * "comps_size", and return the number of completions which were
- * stored, so the caller can free them.
- */
-static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq,
- struct sk_buff *comps[],
- unsigned int comps_size,
- unsigned int min_size)
-{
- unsigned int n = 0;
-
- unsigned int comp_head = eq->comp_head;
- unsigned int comp_busy = eq->comp_busy;
-
- while (comp_head != comp_busy && n < comps_size) {
- comps[n++] = eq->comps[comp_head];
- LEPP_QINC(comp_head);
- }
-
- if (n < min_size)
- return 0;
-
- eq->comp_head = comp_head;
-
- return n;
-}
-
-
-/*
- * Free some comps, and return true iff there are still some pending.
- */
-static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- lepp_queue_t *eq = priv->eq;
-
- struct sk_buff *olds[64];
- unsigned int wanted = 64;
- unsigned int i, n;
- bool pending;
-
- spin_lock(&priv->eq_lock);
-
- if (all)
- eq->comp_busy = eq->comp_tail;
-
- n = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
-
- pending = (eq->comp_head != eq->comp_tail);
-
- spin_unlock(&priv->eq_lock);
-
- for (i = 0; i < n; i++)
- kfree_skb(olds[i]);
-
- return pending;
-}
-
-
-/*
- * Make sure the egress timer is scheduled.
- *
- * Note that we use "schedule if not scheduled" logic instead of the more
- * obvious "reschedule" logic, because "reschedule" is fairly expensive.
- */
-static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
-{
- if (!info->egress_timer_scheduled) {
- mod_timer(&info->egress_timer, jiffies + 1);
- info->egress_timer_scheduled = true;
- }
-}
-
-
-/*
- * The "function" for "info->egress_timer".
- *
- * This timer will reschedule itself as long as there are any pending
- * completions expected (on behalf of any tile).
- *
- * ISSUE: Realistically, will the timer ever stop scheduling itself?
- *
- * ISSUE: This timer is almost never actually needed, so just use a global
- * timer that can run on any tile.
- *
- * ISSUE: Maybe instead track number of expected completions, and free
- * only that many, resetting to zero if "pending" is ever false.
- */
-static void tile_net_handle_egress_timer(struct timer_list *t)
-{
- struct tile_net_cpu *info = from_timer(info, t, egress_timer);
- struct net_device *dev = info->napi.dev;
-
- /* The timer is no longer scheduled. */
- info->egress_timer_scheduled = false;
-
- /* Free comps, and reschedule timer if more are pending. */
- if (tile_net_lepp_free_comps(dev, false))
- tile_net_schedule_egress_timer(info);
-}
-
-
-static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
-{
- struct tile_netio_queue *queue = &info->queue;
- netio_queue_impl_t *qsp = queue->__system_part;
- netio_queue_user_impl_t *qup = &queue->__user_part;
-
- int index2_aux = index + sizeof(netio_pkt_t);
- int index2 =
- ((index2_aux ==
- qsp->__packet_receive_queue.__last_packet_plus_one) ?
- 0 : index2_aux);
-
- netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
-
- /* Extract the "linux_buffer_t". */
- unsigned int buffer = pkt->__packet.word;
-
- /* Convert "linux_buffer_t" to "va". */
- void *va = __va((phys_addr_t)(buffer >> 1) << 7);
-
- /* Acquire the associated "skb". */
- struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
- struct sk_buff *skb = *skb_ptr;
-
- kfree_skb(skb);
-
- /* Consume this packet. */
- qup->__packet_receive_read = index2;
-}
-
-
-/*
- * Like "tile_net_poll()", but just discard packets.
- */
-static void tile_net_discard_packets(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
- struct tile_netio_queue *queue = &info->queue;
- netio_queue_impl_t *qsp = queue->__system_part;
- netio_queue_user_impl_t *qup = &queue->__user_part;
-
- while (qup->__packet_receive_read !=
- qsp->__packet_receive_queue.__packet_write) {
- int index = qup->__packet_receive_read;
- tile_net_discard_aux(info, index);
- }
-}
-
-
-/*
- * Handle the next packet. Return true if "processed", false if "filtered".
- */
-static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
-{
- struct net_device *dev = info->napi.dev;
-
- struct tile_netio_queue *queue = &info->queue;
- netio_queue_impl_t *qsp = queue->__system_part;
- netio_queue_user_impl_t *qup = &queue->__user_part;
- struct tile_net_stats_t *stats = &info->stats;
-
- int filter;
-
- int index2_aux = index + sizeof(netio_pkt_t);
- int index2 =
- ((index2_aux ==
- qsp->__packet_receive_queue.__last_packet_plus_one) ?
- 0 : index2_aux);
-
- netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
-
- netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
- netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
-
- /* Extract the packet size. FIXME: Shouldn't the second line */
- /* get subtracted? Mostly moot, since it should be "zero". */
- unsigned long len =
- (NETIO_PKT_CUSTOM_LENGTH(pkt) +
- NET_IP_ALIGN - NETIO_PACKET_PADDING);
-
- /* Extract the "linux_buffer_t". */
- unsigned int buffer = pkt->__packet.word;
-
- /* Extract "small" (vs "large"). */
- bool small = ((buffer & 1) != 0);
-
- /* Convert "linux_buffer_t" to "va". */
- void *va = __va((phys_addr_t)(buffer >> 1) << 7);
-
- /* Extract the packet data pointer. */
- /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
- unsigned char *buf = va + NET_IP_ALIGN;
-
- /* Invalidate the packet buffer. */
- if (!hash_default)
- __inv_buffer(buf, len);
-
-#ifdef TILE_NET_DUMP_PACKETS
- dump_packet(buf, len, "rx");
-#endif /* TILE_NET_DUMP_PACKETS */
-
-#ifdef TILE_NET_VERIFY_INGRESS
- if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
- dump_packet(buf, len, "rx");
- panic("Unexpected OVERSIZE.");
- }
-#endif
-
- filter = 0;
-
- if (pkt_status == NETIO_PKT_STATUS_BAD) {
- /* Handle CRC error and hardware truncation. */
- filter = 2;
- } else if (!(dev->flags & IFF_UP)) {
- /* Filter packets received before we're up. */
- filter = 1;
- } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
- pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
- /* Filter "truncated" packets. */
- filter = 2;
- } else if (!(dev->flags & IFF_PROMISC)) {
- if (!is_multicast_ether_addr(buf)) {
- /* Filter packets not for our address. */
- const u8 *mine = dev->dev_addr;
- filter = !ether_addr_equal(mine, buf);
- }
- }
-
- u64_stats_update_begin(&stats->syncp);
-
- if (filter != 0) {
-
- if (filter == 1)
- stats->rx_dropped++;
- else
- stats->rx_errors++;
-
- tile_net_provide_linux_buffer(info, va, small);
-
- } else {
-
- /* Acquire the associated "skb". */
- struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
- struct sk_buff *skb = *skb_ptr;
-
- /* Paranoia. */
- if (skb->data != buf)
- panic("Corrupt linux buffer from LIPP! "
- "VA=%p, skb=%p, skb->data=%p\n",
- va, skb, skb->data);
-
- /* Encode the actual packet length. */
- skb_put(skb, len);
-
- /* NOTE: This call also sets "skb->dev = dev". */
- skb->protocol = eth_type_trans(skb, dev);
-
- /* Avoid recomputing "good" TCP/UDP checksums. */
- if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- netif_receive_skb(skb);
-
- stats->rx_packets++;
- stats->rx_bytes += len;
- }
-
- u64_stats_update_end(&stats->syncp);
-
- /* ISSUE: It would be nice to defer this until the packet has */
- /* actually been processed. */
- tile_net_return_credit(info);
-
- /* Consume this packet. */
- qup->__packet_receive_read = index2;
-
- return !filter;
-}
-
-
-/*
- * Handle some packets for the given device on the current CPU.
- *
- * If "tile_net_stop()" is called on some other tile while this
- * function is running, we will return, hopefully before that
- * other tile asks us to call "napi_disable()".
- *
- * The "rotting packet" race condition occurs if a packet arrives
- * during the extremely narrow window between the queue appearing to
- * be empty, and the ingress interrupt being re-enabled. This happens
- * a LOT under heavy network load.
- */
-static int tile_net_poll(struct napi_struct *napi, int budget)
-{
- struct net_device *dev = napi->dev;
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
- struct tile_netio_queue *queue = &info->queue;
- netio_queue_impl_t *qsp = queue->__system_part;
- netio_queue_user_impl_t *qup = &queue->__user_part;
-
- unsigned int work = 0;
-
- if (budget <= 0)
- goto done;
-
- while (priv->active) {
- int index = qup->__packet_receive_read;
- if (index == qsp->__packet_receive_queue.__packet_write)
- break;
-
- if (tile_net_poll_aux(info, index)) {
- if (++work >= budget)
- goto done;
- }
- }
-
- napi_complete_done(&info->napi, work);
-
- if (!priv->active)
- goto done;
-
- /* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id, 0);
-
- /* HACK: Avoid the "rotting packet" problem (see above). */
- if (qup->__packet_receive_read !=
- qsp->__packet_receive_queue.__packet_write) {
- /* ISSUE: Sometimes this returns zero, presumably */
- /* because an interrupt was handled for this tile. */
- (void)napi_reschedule(&info->napi);
- }
-
-done:
-
- if (priv->active)
- tile_net_provide_needed_buffers(info);
-
- return work;
-}
-
-
-/*
- * Handle an ingress interrupt for the given device on the current cpu.
- *
- * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has
- * been called! This is probably due to "pending hypervisor downcalls".
- *
- * ISSUE: Is there any race condition between the "napi_schedule()" here
- * and the "napi_complete()" call above?
- */
-static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
-{
- struct net_device *dev = (struct net_device *)dev_ptr;
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
-
- /* Disable the ingress interrupt. */
- disable_percpu_irq(priv->intr_id);
-
- /* Ignore unwanted interrupts. */
- if (!priv->active)
- return IRQ_HANDLED;
-
- /* ISSUE: Sometimes "info->napi_enabled" is false here. */
-
- napi_schedule(&info->napi);
-
- return IRQ_HANDLED;
-}
-
-
-/*
- * One time initialization per interface.
- */
-static int tile_net_open_aux(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- int ret;
- int dummy;
- unsigned int epp_lotar;
-
- /*
- * Find out where EPP memory should be homed.
- */
- ret = hv_dev_pread(priv->hv_devhdl, 0,
- (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
- NETIO_EPP_SHM_OFF);
- if (ret < 0) {
- pr_err("could not read epp_shm_queue lotar.\n");
- return -EIO;
- }
-
- /*
- * Home the page on the EPP.
- */
- {
- int epp_home = hv_lotar_to_cpu(epp_lotar);
- homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home);
- }
-
- /*
- * Register the EPP shared memory queue.
- */
- {
- netio_ipp_address_t ea = {
- .va = 0,
- .pa = __pa(priv->eq),
- .pte = hv_pte(0),
- .size = EQ_SIZE,
- };
- ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
- ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
- ret = hv_dev_pwrite(priv->hv_devhdl, 0,
- (HV_VirtAddr)&ea,
- sizeof(ea),
- NETIO_EPP_SHM_OFF);
- if (ret < 0)
- return -EIO;
- }
-
- /*
- * Start LIPP/LEPP.
- */
- if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
- sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
- pr_warn("Failed to start LIPP/LEPP\n");
- return -EIO;
- }
-
- return 0;
-}
-
-
-/*
- * Register with hypervisor on the current CPU.
- *
- * Strangely, this function does important things even if it "fails",
- * which is especially common if the link is not up yet. Hopefully
- * these things are all "harmless" if done twice!
- */
-static void tile_net_register(void *dev_ptr)
-{
- struct net_device *dev = (struct net_device *)dev_ptr;
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info;
-
- struct tile_netio_queue *queue;
-
- /* Only network cpus can receive packets. */
- int queue_id =
- cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
-
- netio_input_config_t config = {
- .flags = 0,
- .num_receive_packets = priv->network_cpus_credits,
- .queue_id = queue_id
- };
-
- int ret = 0;
- netio_queue_impl_t *queuep;
-
- PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
-
- if (!strcmp(dev->name, "xgbe0"))
- info = this_cpu_ptr(&hv_xgbe0);
- else if (!strcmp(dev->name, "xgbe1"))
- info = this_cpu_ptr(&hv_xgbe1);
- else if (!strcmp(dev->name, "gbe0"))
- info = this_cpu_ptr(&hv_gbe0);
- else if (!strcmp(dev->name, "gbe1"))
- info = this_cpu_ptr(&hv_gbe1);
- else
- BUG();
-
- /* Initialize the egress timer. */
- timer_setup(&info->egress_timer, tile_net_handle_egress_timer,
- TIMER_PINNED);
-
- u64_stats_init(&info->stats.syncp);
-
- priv->cpu[my_cpu] = info;
-
- /*
- * Register ourselves with LIPP. This does a lot of stuff,
- * including invoking the LIPP registration code.
- */
- ret = hv_dev_pwrite(priv->hv_devhdl, 0,
- (HV_VirtAddr)&config,
- sizeof(netio_input_config_t),
- NETIO_IPP_INPUT_REGISTER_OFF);
- PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
- ret);
- if (ret < 0) {
- if (ret != NETIO_LINK_DOWN) {
- printk(KERN_DEBUG "hv_dev_pwrite "
- "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n",
- ret);
- }
- info->link_down = (ret == NETIO_LINK_DOWN);
- return;
- }
-
- /*
- * Get the pointer to our queue's system part.
- */
-
- ret = hv_dev_pread(priv->hv_devhdl, 0,
- (HV_VirtAddr)&queuep,
- sizeof(netio_queue_impl_t *),
- NETIO_IPP_INPUT_REGISTER_OFF);
- PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
- ret);
- PDEBUG("queuep %p\n", queuep);
- if (ret <= 0) {
- /* ISSUE: Shouldn't this be a fatal error? */
- pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
- return;
- }
-
- queue = &info->queue;
-
- queue->__system_part = queuep;
-
- memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
-
- /* This is traditionally "config.num_receive_packets / 2". */
- queue->__user_part.__receive_credit_interval = 4;
- queue->__user_part.__receive_credit_remaining =
- queue->__user_part.__receive_credit_interval;
-
- /*
- * Get a fastio index from the hypervisor.
- * ISSUE: Shouldn't this check the result?
- */
- ret = hv_dev_pread(priv->hv_devhdl, 0,
- (HV_VirtAddr)&queue->__user_part.__fastio_index,
- sizeof(queue->__user_part.__fastio_index),
- NETIO_IPP_GET_FASTIO_OFF);
- PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
-
- /* Now we are registered. */
- info->registered = true;
-}
-
-
-/*
- * Deregister with hypervisor on the current CPU.
- *
- * This simply discards all our credits, so no more packets will be
- * delivered to this tile. There may still be packets in our queue.
- *
- * Also, disable the ingress interrupt.
- */
-static void tile_net_deregister(void *dev_ptr)
-{
- struct net_device *dev = (struct net_device *)dev_ptr;
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
-
- /* Disable the ingress interrupt. */
- disable_percpu_irq(priv->intr_id);
-
- /* Do nothing else if not registered. */
- if (info == NULL || !info->registered)
- return;
-
- {
- struct tile_netio_queue *queue = &info->queue;
- netio_queue_user_impl_t *qup = &queue->__user_part;
-
- /* Discard all our credits. */
- __netio_fastio_return_credits(qup->__fastio_index, -1);
- }
-}
-
-
-/*
- * Unregister with hypervisor on the current CPU.
- *
- * Also, disable the ingress interrupt.
- */
-static void tile_net_unregister(void *dev_ptr)
-{
- struct net_device *dev = (struct net_device *)dev_ptr;
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
-
- int ret;
- int dummy = 0;
-
- /* Disable the ingress interrupt. */
- disable_percpu_irq(priv->intr_id);
-
- /* Do nothing else if not registered. */
- if (info == NULL || !info->registered)
- return;
-
- /* Unregister ourselves with LIPP/LEPP. */
- ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
- sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
- if (ret < 0)
- panic("Failed to unregister with LIPP/LEPP!\n");
-
- /* Discard all packets still in our NetIO queue. */
- tile_net_discard_packets(dev);
-
- /* Reset state. */
- info->num_needed_small_buffers = 0;
- info->num_needed_large_buffers = 0;
-
- /* Cancel egress timer. */
- del_timer(&info->egress_timer);
- info->egress_timer_scheduled = false;
-}
-
-
-/*
- * Helper function for "tile_net_stop()".
- *
- * Also used to handle registration failure in "tile_net_open_inner()",
- * when the various extra steps in "tile_net_stop()" are not necessary.
- */
-static void tile_net_stop_aux(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- int i;
-
- int dummy = 0;
-
- /*
- * Unregister all tiles, so LIPP will stop delivering packets.
- * Also, delete all the "napi" objects (sequentially, to protect
- * "dev->napi_list").
- */
- on_each_cpu(tile_net_unregister, (void *)dev, 1);
- for_each_online_cpu(i) {
- struct tile_net_cpu *info = priv->cpu[i];
- if (info != NULL && info->registered) {
- netif_napi_del(&info->napi);
- info->registered = false;
- }
- }
-
- /* Stop LIPP/LEPP. */
- if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
- sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
- panic("Failed to stop LIPP/LEPP!\n");
-
- priv->partly_opened = false;
-}
-
-
-/*
- * Disable NAPI for the given device on the current cpu.
- */
-static void tile_net_stop_disable(void *dev_ptr)
-{
- struct net_device *dev = (struct net_device *)dev_ptr;
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
-
- /* Disable NAPI if needed. */
- if (info != NULL && info->napi_enabled) {
- napi_disable(&info->napi);
- info->napi_enabled = false;
- }
-}
-
-
-/*
- * Enable NAPI and the ingress interrupt for the given device
- * on the current cpu.
- *
- * ISSUE: Only do this for "network cpus"?
- */
-static void tile_net_open_enable(void *dev_ptr)
-{
- struct net_device *dev = (struct net_device *)dev_ptr;
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
-
- /* Enable NAPI. */
- napi_enable(&info->napi);
- info->napi_enabled = true;
-
- /* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id, 0);
-}
-
-
-/*
- * tile_net_open_inner does most of the work of bringing up the interface.
- * It's called from tile_net_open(), and also from tile_net_retry_open().
- * The return value is 0 if the interface was brought up, < 0 if
- * tile_net_open() should return the return value as an error, and > 0 if
- * tile_net_open() should return success and schedule a work item to
- * periodically retry the bringup.
- */
-static int tile_net_open_inner(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info;
- struct tile_netio_queue *queue;
- int result = 0;
- int i;
- int dummy = 0;
-
- /*
- * First try to register just on the local CPU, and handle any
- * semi-expected "link down" failure specially. Note that we
- * do NOT call "tile_net_stop_aux()", unlike below.
- */
- tile_net_register(dev);
- info = priv->cpu[my_cpu];
- if (!info->registered) {
- if (info->link_down)
- return 1;
- return -EAGAIN;
- }
-
- /*
- * Now register everywhere else. If any registration fails,
- * even for "link down" (which might not be possible), we
- * clean up using "tile_net_stop_aux()". Also, add all the
- * "napi" objects (sequentially, to protect "dev->napi_list").
- * ISSUE: Only use "netif_napi_add()" for "network cpus"?
- */
- smp_call_function(tile_net_register, (void *)dev, 1);
- for_each_online_cpu(i) {
- struct tile_net_cpu *info = priv->cpu[i];
- if (info->registered)
- netif_napi_add(dev, &info->napi, tile_net_poll, 64);
- else
- result = -EAGAIN;
- }
- if (result != 0) {
- tile_net_stop_aux(dev);
- return result;
- }
-
- queue = &info->queue;
-
- if (priv->intr_id == 0) {
- unsigned int irq;
-
- /*
- * Acquire the irq allocated by the hypervisor. Every
- * queue gets the same irq. The "__intr_id" field is
- * "1 << irq", so we use "__ffs()" to extract "irq".
- */
- priv->intr_id = queue->__system_part->__intr_id;
- BUG_ON(priv->intr_id == 0);
- irq = __ffs(priv->intr_id);
-
- /*
- * Register the ingress interrupt handler for this
- * device, permanently.
- *
- * We used to call "free_irq()" in "tile_net_stop()",
- * and then re-register the handler here every time,
- * but that caused DNP errors in "handle_IRQ_event()"
- * because "desc->action" was NULL. See bug 9143.
- */
- tile_irq_activate(irq, TILE_IRQ_PERCPU);
- BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
- 0, dev->name, (void *)dev) != 0);
- }
-
- {
- /* Allocate initial buffers. */
-
- int max_buffers =
- priv->network_cpus_count * priv->network_cpus_credits;
-
- info->num_needed_small_buffers =
- min(LIPP_SMALL_BUFFERS, max_buffers);
-
- info->num_needed_large_buffers =
- min(LIPP_LARGE_BUFFERS, max_buffers);
-
- tile_net_provide_needed_buffers(info);
-
- if (info->num_needed_small_buffers != 0 ||
- info->num_needed_large_buffers != 0)
- panic("Insufficient memory for buffer stack!");
- }
-
- /* We are about to be active. */
- priv->active = true;
-
- /* Make sure "active" is visible to all tiles. */
- mb();
-
- /* On each tile, enable NAPI and the ingress interrupt. */
- on_each_cpu(tile_net_open_enable, (void *)dev, 1);
-
- /* Start LIPP/LEPP and activate "ingress" at the shim. */
- if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
- sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
- panic("Failed to activate the LIPP Shim!\n");
-
- /* Start our transmit queue. */
- netif_start_queue(dev);
-
- return 0;
-}
-
-
-/*
- * Called periodically to retry bringing up the NetIO interface,
- * if it doesn't come up cleanly during tile_net_open().
- */
-static void tile_net_open_retry(struct work_struct *w)
-{
- struct delayed_work *dw = to_delayed_work(w);
-
- struct tile_net_priv *priv =
- container_of(dw, struct tile_net_priv, retry_work);
-
- /*
- * Try to bring the NetIO interface up. If it fails, reschedule
- * ourselves to try again later; otherwise, tell Linux we now have
- * a working link. ISSUE: What if the return value is negative?
- */
- if (tile_net_open_inner(priv->dev) != 0)
- schedule_delayed_work(&priv->retry_work,
- TILE_NET_RETRY_INTERVAL);
- else
- netif_carrier_on(priv->dev);
-}
-
-
-/*
- * Called when a network interface is made active.
- *
- * Returns 0 on success, negative value on failure.
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS (if needed), the watchdog timer
- * is started, and the stack is notified that the interface is ready.
- *
- * If the actual link is not available yet, then we tell Linux that
- * we have no carrier, and we keep checking until the link comes up.
- */
-static int tile_net_open(struct net_device *dev)
-{
- int ret = 0;
- struct tile_net_priv *priv = netdev_priv(dev);
-
- /*
- * We rely on priv->partly_opened to tell us if this is the
- * first time this interface is being brought up. If it is
- * set, the IPP was already initialized and should not be
- * initialized again.
- */
- if (!priv->partly_opened) {
-
- int count;
- int credits;
-
- /* Initialize LIPP/LEPP, and start the Shim. */
- ret = tile_net_open_aux(dev);
- if (ret < 0) {
- pr_err("tile_net_open_aux failed: %d\n", ret);
- return ret;
- }
-
- /* Analyze the network cpus. */
-
- if (network_cpus_used)
- cpumask_copy(&priv->network_cpus_map,
- &network_cpus_map);
- else
- cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
-
-
- count = cpumask_weight(&priv->network_cpus_map);
-
- /* Limit credits to available buffers, and apply min. */
- credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
-
- /* Apply "GBE" max limit. */
- /* ISSUE: Use higher limit for XGBE? */
- credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
-
- priv->network_cpus_count = count;
- priv->network_cpus_credits = credits;
-
-#ifdef TILE_NET_DEBUG
- pr_info("Using %d network cpus, with %d credits each\n",
- priv->network_cpus_count, priv->network_cpus_credits);
-#endif
-
- priv->partly_opened = true;
-
- } else {
- /* FIXME: Is this possible? */
- /* printk("Already partly opened.\n"); */
- }
-
- /*
- * Attempt to bring up the link.
- */
- ret = tile_net_open_inner(dev);
- if (ret <= 0) {
- if (ret == 0)
- netif_carrier_on(dev);
- return ret;
- }
-
- /*
- * We were unable to bring up the NetIO interface, but we want to
- * try again in a little bit. Tell Linux that we have no carrier
- * so it doesn't try to use the interface before the link comes up
- * and then remember to try again later.
- */
- netif_carrier_off(dev);
- schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL);
-
- return 0;
-}
-
-
-static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
-{
- int n = 0;
-
- /* Drain all the LIPP buffers. */
- while (true) {
- unsigned int buffer;
-
- /* NOTE: This should never fail. */
- if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
- sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
- break;
-
- /* Stop when done. */
- if (buffer == 0)
- break;
-
- {
- /* Convert "linux_buffer_t" to "va". */
- void *va = __va((phys_addr_t)(buffer >> 1) << 7);
-
- /* Acquire the associated "skb". */
- struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
- struct sk_buff *skb = *skb_ptr;
-
- kfree_skb(skb);
- }
-
- n++;
- }
-
- return n;
-}
-
-
-/*
- * Disables a network interface.
- *
- * Returns 0, this is not allowed to fail.
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- *
- * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"?
- *
- * Before we are called by "__dev_close()", "netif_running()" will
- * have been cleared, so no NEW calls to "tile_net_poll()" will be
- * made by "netpoll_poll_dev()".
- *
- * Often, this can cause some tiles to still have packets in their
- * queues, so we must call "tile_net_discard_packets()" later.
- *
- * Note that some other tile may still be INSIDE "tile_net_poll()",
- * and in fact, many will be, if there is heavy network load.
- *
- * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when
- * any tile is still "napi_schedule()"'d will induce a horrible crash
- * when "msleep()" is called. This includes tiles which are inside
- * "tile_net_poll()" which have not yet called "napi_complete()".
- *
- * So, we must first try to wait long enough for other tiles to finish
- * with any current "tile_net_poll()" call, and, hopefully, to clear
- * the "scheduled" flag. ISSUE: It is unclear what happens to tiles
- * which have called "napi_schedule()" but which had not yet tried to
- * call "tile_net_poll()", or which exhausted their budget inside
- * "tile_net_poll()" just before this function was called.
- */
-static int tile_net_stop(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- PDEBUG("tile_net_stop()\n");
-
- /* Start discarding packets. */
- priv->active = false;
-
- /* Make sure "active" is visible to all tiles. */
- mb();
-
- /*
- * On each tile, make sure no NEW packets get delivered, and
- * disable the ingress interrupt.
- *
- * Note that the ingress interrupt can fire AFTER this,
- * presumably due to packets which were recently delivered,
- * but it will have no effect.
- */
- on_each_cpu(tile_net_deregister, (void *)dev, 1);
-
- /* Optimistically drain LIPP buffers. */
- (void)tile_net_drain_lipp_buffers(priv);
-
- /* ISSUE: Only needed if not yet fully open. */
- cancel_delayed_work_sync(&priv->retry_work);
-
- /* Can't transmit any more. */
- netif_stop_queue(dev);
-
- /* Disable NAPI on each tile. */
- on_each_cpu(tile_net_stop_disable, (void *)dev, 1);
-
- /*
- * Drain any remaining LIPP buffers. NOTE: This "printk()"
- * has never been observed, but in theory it could happen.
- */
- if (tile_net_drain_lipp_buffers(priv) != 0)
- printk("Had to drain some extra LIPP buffers!\n");
-
- /* Stop LIPP/LEPP. */
- tile_net_stop_aux(dev);
-
- /*
- * ISSUE: It appears that, in practice anyway, by the time we
- * get here, there are no pending completions, but just in case,
- * we free (all of) them anyway.
- */
- while (tile_net_lepp_free_comps(dev, true))
- /* loop */;
-
- /* Wipe the EPP queue, and wait till the stores hit the EPP. */
- memset(priv->eq, 0, sizeof(lepp_queue_t));
- mb();
-
- return 0;
-}
-
-
-/*
- * Prepare the "frags" info for the resulting LEPP command.
- *
- * If needed, flush the memory used by the frags.
- */
-static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
- struct sk_buff *skb,
- void *b_data, unsigned int b_len)
-{
- unsigned int i, n = 0;
-
- struct skb_shared_info *sh = skb_shinfo(skb);
-
- phys_addr_t cpa;
-
- if (b_len != 0) {
-
- if (!hash_default)
- finv_buffer_remote(b_data, b_len, 0);
-
- cpa = __pa(b_data);
- frags[n].cpa_lo = cpa;
- frags[n].cpa_hi = cpa >> 32;
- frags[n].length = b_len;
- frags[n].hash_for_home = hash_default;
- n++;
- }
-
- for (i = 0; i < sh->nr_frags; i++) {
-
- skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(skb_frag_page(f));
-
- /* FIXME: Compute "hash_for_home" properly. */
- /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
- int hash_for_home = hash_default;
-
- /* FIXME: Hmmm. */
- if (!hash_default) {
- void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(skb_frag_page(f)));
- finv_buffer_remote(va, skb_frag_size(f), 0);
- }
-
- cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
- frags[n].cpa_lo = cpa;
- frags[n].cpa_hi = cpa >> 32;
- frags[n].length = skb_frag_size(f);
- frags[n].hash_for_home = hash_for_home;
- n++;
- }
-
- return n;
-}
-
-
-/*
- * This function takes "skb", consisting of a header template and a
- * payload, and hands it to LEPP, to emit as one or more segments,
- * each consisting of a possibly modified header, plus a piece of the
- * payload, via a process known as "tcp segmentation offload".
- *
- * Usually, "data" will contain the header template, of size "sh_len",
- * and "sh->frags" will contain "skb->data_len" bytes of payload, and
- * there will be "sh->gso_segs" segments.
- *
- * Sometimes, if "sendfile()" requires copying, we will be called with
- * "data" containing the header and payload, with "frags" being empty.
- *
- * Sometimes, for example when using NFS over TCP, a single segment can
- * span 3 fragments, which must be handled carefully in LEPP.
- *
- * See "emulate_large_send_offload()" for some reference code, which
- * does not handle checksumming.
- *
- * ISSUE: How do we make sure that high memory DMA does not migrate?
- */
-static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
- struct tile_net_stats_t *stats = &info->stats;
-
- struct skb_shared_info *sh = skb_shinfo(skb);
-
- unsigned char *data = skb->data;
-
- /* The ip header follows the ethernet header. */
- struct iphdr *ih = ip_hdr(skb);
- unsigned int ih_len = ih->ihl * 4;
-
- /* Note that "nh == ih", by definition. */
- unsigned char *nh = skb_network_header(skb);
- unsigned int eh_len = nh - data;
-
- /* The tcp header follows the ip header. */
- struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
- unsigned int th_len = th->doff * 4;
-
- /* The total number of header bytes. */
- /* NOTE: This may be less than skb_headlen(skb). */
- unsigned int sh_len = eh_len + ih_len + th_len;
-
- /* The number of payload bytes at "skb->data + sh_len". */
- /* This is non-zero for sendfile() without HIGHDMA. */
- unsigned int b_len = skb_headlen(skb) - sh_len;
-
- /* The total number of payload bytes. */
- unsigned int d_len = b_len + skb->data_len;
-
- /* The maximum payload size. */
- unsigned int p_len = sh->gso_size;
-
- /* The total number of segments. */
- unsigned int num_segs = sh->gso_segs;
-
- /* The temporary copy of the command. */
- u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
- lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
-
- /* Analyze the "frags". */
- unsigned int num_frags =
- tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
-
- /* The size of the command, including frags and header. */
- size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
-
- /* The command header. */
- lepp_tso_cmd_t cmd_init = {
- .tso = true,
- .header_size = sh_len,
- .ip_offset = eh_len,
- .tcp_offset = eh_len + ih_len,
- .payload_size = p_len,
- .num_frags = num_frags,
- };
-
- unsigned long irqflags;
-
- lepp_queue_t *eq = priv->eq;
-
- struct sk_buff *olds[8];
- unsigned int wanted = 8;
- unsigned int i, nolds = 0;
-
- unsigned int cmd_head, cmd_tail, cmd_next;
- unsigned int comp_tail;
-
-
- /* Paranoia. */
- BUG_ON(skb->protocol != htons(ETH_P_IP));
- BUG_ON(ih->protocol != IPPROTO_TCP);
- BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
- BUG_ON(num_frags > LEPP_MAX_FRAGS);
- /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */
- BUG_ON(num_segs <= 1);
-
-
- /* Finish preparing the command. */
-
- /* Copy the command header. */
- *cmd = cmd_init;
-
- /* Copy the "header". */
- memcpy(&cmd->frags[num_frags], data, sh_len);
-
-
- /* Prefetch and wait, to minimize time spent holding the spinlock. */
- prefetch_L1(&eq->comp_tail);
- prefetch_L1(&eq->cmd_tail);
- mb();
-
-
- /* Enqueue the command. */
-
- spin_lock_irqsave(&priv->eq_lock, irqflags);
-
- /* Handle completions if needed to make room. */
- /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
- if (lepp_num_free_comp_slots(eq) == 0) {
- nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
- if (nolds == 0) {
-busy:
- spin_unlock_irqrestore(&priv->eq_lock, irqflags);
- return NETDEV_TX_BUSY;
- }
- }
-
- cmd_head = eq->cmd_head;
- cmd_tail = eq->cmd_tail;
-
- /* Prepare to advance, detecting full queue. */
- /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
- cmd_next = cmd_tail + cmd_size;
- if (cmd_tail < cmd_head && cmd_next >= cmd_head)
- goto busy;
- if (cmd_next > LEPP_CMD_LIMIT) {
- cmd_next = 0;
- if (cmd_next == cmd_head)
- goto busy;
- }
-
- /* Copy the command. */
- memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
-
- /* Advance. */
- cmd_tail = cmd_next;
-
- /* Record "skb" for eventual freeing. */
- comp_tail = eq->comp_tail;
- eq->comps[comp_tail] = skb;
- LEPP_QINC(comp_tail);
- eq->comp_tail = comp_tail;
-
- /* Flush before allowing LEPP to handle the command. */
- /* ISSUE: Is this the optimal location for the flush? */
- __insn_mf();
-
- eq->cmd_tail = cmd_tail;
-
- /* NOTE: Using "4" here is more efficient than "0" or "2", */
- /* and, strangely, more efficient than pre-checking the number */
- /* of available completions, and comparing it to 4. */
- if (nolds == 0)
- nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
-
- spin_unlock_irqrestore(&priv->eq_lock, irqflags);
-
- /* Handle completions. */
- for (i = 0; i < nolds; i++)
- dev_consume_skb_any(olds[i]);
-
- /* Update stats. */
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets += num_segs;
- stats->tx_bytes += (num_segs * sh_len) + d_len;
- u64_stats_update_end(&stats->syncp);
-
- /* Make sure the egress timer is scheduled. */
- tile_net_schedule_egress_timer(info);
-
- return NETDEV_TX_OK;
-}
-
-
-/*
- * Transmit a packet (called by the kernel via "hard_start_xmit" hook).
- */
-static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- int my_cpu = smp_processor_id();
- struct tile_net_cpu *info = priv->cpu[my_cpu];
- struct tile_net_stats_t *stats = &info->stats;
-
- unsigned long irqflags;
-
- struct skb_shared_info *sh = skb_shinfo(skb);
-
- unsigned int len = skb->len;
- unsigned char *data = skb->data;
-
- unsigned int csum_start = skb_checksum_start_offset(skb);
-
- lepp_frag_t frags[1 + MAX_SKB_FRAGS];
-
- unsigned int num_frags;
-
- lepp_queue_t *eq = priv->eq;
-
- struct sk_buff *olds[8];
- unsigned int wanted = 8;
- unsigned int i, nolds = 0;
-
- unsigned int cmd_size = sizeof(lepp_cmd_t);
-
- unsigned int cmd_head, cmd_tail, cmd_next;
- unsigned int comp_tail;
-
- lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
-
-
- /*
- * This is paranoia, since we think that if the link doesn't come
- * up, telling Linux we have no carrier will keep it from trying
- * to transmit. If it does, though, we can't execute this routine,
- * since data structures we depend on aren't set up yet.
- */
- if (!info->registered)
- return NETDEV_TX_BUSY;
-
-
- /* Save the timestamp. */
- netif_trans_update(dev);
-
-
-#ifdef TILE_NET_PARANOIA
-#if CHIP_HAS_CBOX_HOME_MAP()
- if (hash_default) {
- HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
- if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
- panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx",
- data, hv_pte_get_mode(pte), hv_pte_val(pte));
- }
-#endif
-#endif
-
-
-#ifdef TILE_NET_DUMP_PACKETS
- /* ISSUE: Does not dump the "frags". */
- dump_packet(data, skb_headlen(skb), "tx");
-#endif /* TILE_NET_DUMP_PACKETS */
-
-
- if (sh->gso_size != 0)
- return tile_net_tx_tso(skb, dev);
-
-
- /* Prepare the commands. */
-
- num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
-
- for (i = 0; i < num_frags; i++) {
-
- bool final = (i == num_frags - 1);
-
- lepp_cmd_t cmd = {
- .cpa_lo = frags[i].cpa_lo,
- .cpa_hi = frags[i].cpa_hi,
- .length = frags[i].length,
- .hash_for_home = frags[i].hash_for_home,
- .send_completion = final,
- .end_of_packet = final
- };
-
- if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
- cmd.compute_checksum = 1;
- cmd.checksum_data.bits.start_byte = csum_start;
- cmd.checksum_data.bits.count = len - csum_start;
- cmd.checksum_data.bits.destination_byte =
- csum_start + skb->csum_offset;
- }
-
- cmds[i] = cmd;
- }
-
-
- /* Prefetch and wait, to minimize time spent holding the spinlock. */
- prefetch_L1(&eq->comp_tail);
- prefetch_L1(&eq->cmd_tail);
- mb();
-
-
- /* Enqueue the commands. */
-
- spin_lock_irqsave(&priv->eq_lock, irqflags);
-
- /* Handle completions if needed to make room. */
- /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
- if (lepp_num_free_comp_slots(eq) == 0) {
- nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
- if (nolds == 0) {
-busy:
- spin_unlock_irqrestore(&priv->eq_lock, irqflags);
- return NETDEV_TX_BUSY;
- }
- }
-
- cmd_head = eq->cmd_head;
- cmd_tail = eq->cmd_tail;
-
- /* Copy the commands, or fail. */
- /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
- for (i = 0; i < num_frags; i++) {
-
- /* Prepare to advance, detecting full queue. */
- cmd_next = cmd_tail + cmd_size;
- if (cmd_tail < cmd_head && cmd_next >= cmd_head)
- goto busy;
- if (cmd_next > LEPP_CMD_LIMIT) {
- cmd_next = 0;
- if (cmd_next == cmd_head)
- goto busy;
- }
-
- /* Copy the command. */
- *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
-
- /* Advance. */
- cmd_tail = cmd_next;
- }
-
- /* Record "skb" for eventual freeing. */
- comp_tail = eq->comp_tail;
- eq->comps[comp_tail] = skb;
- LEPP_QINC(comp_tail);
- eq->comp_tail = comp_tail;
-
- /* Flush before allowing LEPP to handle the command. */
- /* ISSUE: Is this the optimal location for the flush? */
- __insn_mf();
-
- eq->cmd_tail = cmd_tail;
-
- /* NOTE: Using "4" here is more efficient than "0" or "2", */
- /* and, strangely, more efficient than pre-checking the number */
- /* of available completions, and comparing it to 4. */
- if (nolds == 0)
- nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
-
- spin_unlock_irqrestore(&priv->eq_lock, irqflags);
-
- /* Handle completions. */
- for (i = 0; i < nolds; i++)
- dev_consume_skb_any(olds[i]);
-
- /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
- u64_stats_update_end(&stats->syncp);
-
- /* Make sure the egress timer is scheduled. */
- tile_net_schedule_egress_timer(info);
-
- return NETDEV_TX_OK;
-}
-
-
-/*
- * Deal with a transmit timeout.
- */
-static void tile_net_tx_timeout(struct net_device *dev)
-{
- PDEBUG("tile_net_tx_timeout()\n");
- PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
- jiffies - dev_trans_start(dev));
-
- /* XXX: ISSUE: This doesn't seem useful for us. */
- netif_wake_queue(dev);
-}
-
-
-/*
- * Ioctl commands.
- */
-static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- return -EOPNOTSUPP;
-}
-
-
-/*
- * Get System Network Statistics.
- *
- * Returns the address of the device statistics structure.
- */
-static void tile_net_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- u64 rx_packets = 0, tx_packets = 0;
- u64 rx_bytes = 0, tx_bytes = 0;
- u64 rx_errors = 0, rx_dropped = 0;
- int i;
-
- for_each_online_cpu(i) {
- struct tile_net_stats_t *cpu_stats;
- u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
- u64 trx_errors, trx_dropped;
- unsigned int start;
-
- if (priv->cpu[i] == NULL)
- continue;
- cpu_stats = &priv->cpu[i]->stats;
-
- do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- trx_packets = cpu_stats->rx_packets;
- ttx_packets = cpu_stats->tx_packets;
- trx_bytes = cpu_stats->rx_bytes;
- ttx_bytes = cpu_stats->tx_bytes;
- trx_errors = cpu_stats->rx_errors;
- trx_dropped = cpu_stats->rx_dropped;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-
- rx_packets += trx_packets;
- tx_packets += ttx_packets;
- rx_bytes += trx_bytes;
- tx_bytes += ttx_bytes;
- rx_errors += trx_errors;
- rx_dropped += trx_dropped;
- }
-
- stats->rx_packets = rx_packets;
- stats->tx_packets = tx_packets;
- stats->rx_bytes = rx_bytes;
- stats->tx_bytes = tx_bytes;
- stats->rx_errors = rx_errors;
- stats->rx_dropped = rx_dropped;
-}
-
-/*
- * Change the Ethernet Address of the NIC.
- *
- * The hypervisor driver does not support changing MAC address. However,
- * the IPP does not do anything with the MAC address, so the address which
- * gets used on outgoing packets, and which is accepted on incoming packets,
- * is completely up to the NetIO program or kernel driver which is actually
- * handling them.
- *
- * Returns 0 on success, negative on failure.
- */
-static int tile_net_set_mac_address(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- /* ISSUE: Note that "dev_addr" is now a pointer. */
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-
- return 0;
-}
-
-
-/*
- * Obtain the MAC address from the hypervisor.
- * This must be done before opening the device.
- */
-static int tile_net_get_mac(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- char hv_dev_name[32];
- int len;
-
- __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
-
- int ret;
-
- /* For example, "xgbe0". */
- strcpy(hv_dev_name, dev->name);
- len = strlen(hv_dev_name);
-
- /* For example, "xgbe/0". */
- hv_dev_name[len] = hv_dev_name[len - 1];
- hv_dev_name[len - 1] = '/';
- len++;
-
- /* For example, "xgbe/0/native_hash". */
- strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
-
- /* Get the hypervisor handle for this device. */
- priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
- PDEBUG("hv_dev_open(%s) returned %d %p\n",
- hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
- if (priv->hv_devhdl < 0) {
- if (priv->hv_devhdl == HV_ENODEV)
- printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
- hv_dev_name);
- else
- printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
- hv_dev_name, priv->hv_devhdl);
- return -1;
- }
-
- /*
- * Read the hardware address from the hypervisor.
- * ISSUE: Note that "dev_addr" is now a pointer.
- */
- offset.bits.class = NETIO_PARAM;
- offset.bits.addr = NETIO_PARAM_MAC;
- ret = hv_dev_pread(priv->hv_devhdl, 0,
- (HV_VirtAddr)dev->dev_addr, dev->addr_len,
- offset.word);
- PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
- if (ret <= 0) {
- printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
- dev->name);
- /*
- * Since the device is configured by the hypervisor but we
- * can't get its MAC address, we are most likely running
- * the simulator, so let's generate a random MAC address.
- */
- eth_hw_addr_random(dev);
- }
-
- return 0;
-}
-
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void tile_net_netpoll(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- disable_percpu_irq(priv->intr_id);
- tile_net_handle_ingress_interrupt(priv->intr_id, dev);
- enable_percpu_irq(priv->intr_id, 0);
-}
-#endif
-
-
-static const struct net_device_ops tile_net_ops = {
- .ndo_open = tile_net_open,
- .ndo_stop = tile_net_stop,
- .ndo_start_xmit = tile_net_tx,
- .ndo_do_ioctl = tile_net_ioctl,
- .ndo_get_stats64 = tile_net_get_stats64,
- .ndo_tx_timeout = tile_net_tx_timeout,
- .ndo_set_mac_address = tile_net_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tile_net_netpoll,
-#endif
-};
-
-
-/*
- * The setup function.
- *
- * This uses ether_setup() to assign various fields in dev, including
- * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
- */
-static void tile_net_setup(struct net_device *dev)
-{
- netdev_features_t features = 0;
-
- ether_setup(dev);
- dev->netdev_ops = &tile_net_ops;
- dev->watchdog_timeo = TILE_NET_TIMEOUT;
- dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
-
- /* MTU range: 68 - 1500 */
- dev->mtu = TILE_NET_MTU;
- dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = TILE_NET_MTU;
-
- features |= NETIF_F_HW_CSUM;
- features |= NETIF_F_SG;
-
- /* We support TSO iff the HV supports sufficient frags. */
- if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
- features |= NETIF_F_TSO;
-
- /* We can't support HIGHDMA without hash_default, since we need
- * to be able to finv() with a VA if we don't have hash_default.
- */
- if (hash_default)
- features |= NETIF_F_HIGHDMA;
-
- dev->hw_features |= features;
- dev->vlan_features |= features;
- dev->features |= features;
-}
-
-
-/*
- * Allocate the device structure, register the device, and obtain the
- * MAC address from the hypervisor.
- */
-static struct net_device *tile_net_dev_init(const char *name)
-{
- int ret;
- struct net_device *dev;
- struct tile_net_priv *priv;
-
- /*
- * Allocate the device structure. This allocates "priv", calls
- * tile_net_setup(), and saves "name". Normally, "name" is a
- * template, instantiated by register_netdev(), but not for us.
- */
- dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
- tile_net_setup);
- if (!dev) {
- pr_err("alloc_netdev(%s) failed\n", name);
- return NULL;
- }
-
- priv = netdev_priv(dev);
-
- /* Initialize "priv". */
-
- memset(priv, 0, sizeof(*priv));
-
- /* Save "dev" for "tile_net_open_retry()". */
- priv->dev = dev;
-
- INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
-
- spin_lock_init(&priv->eq_lock);
-
- /* Allocate "eq". */
- priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER);
- if (!priv->eq_pages) {
- free_netdev(dev);
- return NULL;
- }
- priv->eq = page_address(priv->eq_pages);
-
- /* Register the network device. */
- ret = register_netdev(dev);
- if (ret) {
- pr_err("register_netdev %s failed %d\n", dev->name, ret);
- __free_pages(priv->eq_pages, EQ_ORDER);
- free_netdev(dev);
- return NULL;
- }
-
- /* Get the MAC address. */
- ret = tile_net_get_mac(dev);
- if (ret < 0) {
- unregister_netdev(dev);
- __free_pages(priv->eq_pages, EQ_ORDER);
- free_netdev(dev);
- return NULL;
- }
-
- return dev;
-}
-
-
-/*
- * Module cleanup.
- *
- * FIXME: If compiled as a module, this module cannot be "unloaded",
- * because the "ingress interrupt handler" is registered permanently.
- */
-static void tile_net_cleanup(void)
-{
- int i;
-
- for (i = 0; i < TILE_NET_DEVS; i++) {
- if (tile_net_devs[i]) {
- struct net_device *dev = tile_net_devs[i];
- struct tile_net_priv *priv = netdev_priv(dev);
- unregister_netdev(dev);
- finv_buffer_remote(priv->eq, EQ_SIZE, 0);
- __free_pages(priv->eq_pages, EQ_ORDER);
- free_netdev(dev);
- }
- }
-}
-
-
-/*
- * Module initialization.
- */
-static int tile_net_init_module(void)
-{
- pr_info("Tilera Network Driver\n");
-
- tile_net_devs[0] = tile_net_dev_init("xgbe0");
- tile_net_devs[1] = tile_net_dev_init("xgbe1");
- tile_net_devs[2] = tile_net_dev_init("gbe0");
- tile_net_devs[3] = tile_net_dev_init("gbe1");
-
- return 0;
-}
-
-
-module_init(tile_net_init_module);
-module_exit(tile_net_cleanup);
-
-
-#ifndef MODULE
-
-/*
- * The "network_cpus" boot argument specifies the cpus that are dedicated
- * to handle ingress packets.
- *
- * The parameter should be in the form "network_cpus=m-n[,x-y]", where
- * m, n, x, y are integer numbers that represent the cpus that can be
- * neither a dedicated cpu nor a dataplane cpu.
- */
-static int __init network_cpus_setup(char *str)
-{
- int rc = cpulist_parse_crop(str, &network_cpus_map);
- if (rc != 0) {
- pr_warn("network_cpus=%s: malformed cpu list\n", str);
- } else {
-
- /* Remove dedicated cpus. */
- cpumask_and(&network_cpus_map, &network_cpus_map,
- cpu_possible_mask);
-
-
- if (cpumask_empty(&network_cpus_map)) {
- pr_warn("Ignoring network_cpus='%s'\n", str);
- } else {
- pr_info("Linux network CPUs: %*pbl\n",
- cpumask_pr_args(&network_cpus_map));
- network_cpus_used = true;
- }
- }
-
- return 0;
-}
-__setup("network_cpus=", network_cpus_setup);
-
-#endif