summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c507.c2
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig255
-rw-r--r--drivers/net/arm/w90p910_ether.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atlx/atl1.c1
-rw-r--r--drivers/net/ax88796.c8
-rw-r--r--drivers/net/benet/be_main.c6
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c61
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/caif_spi.c61
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c410
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c8
-rw-r--r--drivers/net/cnic.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c75
-rw-r--r--drivers/net/cxgb4vf/sge.c131
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c24
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000e/82571.c143
-rw-r--r--drivers/net/e1000e/defines.h1
-rw-r--r--drivers/net/e1000e/netdev.c51
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/enic/enic.h2
-rw-r--r--drivers/net/enic/enic_main.c4
-rw-r--r--drivers/net/fec_mpc52xx.c19
-rw-r--r--drivers/net/gianfar.c7
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ibm_newemac/core.c1
-rw-r--r--drivers/net/igbvf/Makefile2
-rw-r--r--drivers/net/igbvf/defines.h2
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/mbx.c2
-rw-r--r--drivers/net/igbvf/mbx.h2
-rw-r--r--drivers/net/igbvf/netdev.c11
-rw-r--r--drivers/net/igbvf/regs.h2
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/igbvf/vf.h2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h122
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c58
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c94
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c98
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c55
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c255
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c15
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2043
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h20
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c722
-rw-r--r--drivers/net/ixgbevf/Makefile2
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c13
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/regs.h2
-rw-r--r--drivers/net/ixgbevf/vf.c2
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c4
-rw-r--r--drivers/net/ks8851.c33
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/lib8390.c24
-rw-r--r--drivers/net/macvlan.c113
-rw-r--r--drivers/net/ne-h8300.c12
-rw-r--r--drivers/net/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/pcmcia/axnet_cs.c48
-rw-r--r--drivers/net/ppp_generic.c12
-rw-r--r--drivers/net/qla3xxx.c8
-rw-r--r--drivers/net/qlcnic/qlcnic.h11
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/qlge/qlge.h4
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c19
-rw-r--r--drivers/net/qlge/qlge_main.c8
-rw-r--r--drivers/net/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/r8169.c12
-rw-r--r--drivers/net/s2io.c3
-rw-r--r--drivers/net/sh_eth.c244
-rw-r--r--drivers/net/skge.c1
-rw-r--r--drivers/net/smsc911x.h2
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/usb/hso.c1
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c951
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c171
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h73
-rw-r--r--drivers/net/vxge/vxge-config.c1219
-rw-r--r--drivers/net/vxge/vxge-config.h135
-rw-r--r--drivers/net/vxge/vxge-ethtool.c110
-rw-r--r--drivers/net/vxge/vxge-main.c563
-rw-r--r--drivers/net/vxge/vxge-main.h81
-rw-r--r--drivers/net/vxge/vxge-reg.h33
-rw-r--r--drivers/net/vxge/vxge-traffic.h28
-rw-r--r--drivers/net/vxge/vxge-version.h31
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/net/xilinx_emaclite.c20
-rw-r--r--drivers/net/znet.c2
132 files changed, 5913 insertions, 3229 deletions
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a098c9b..475a66d95b34 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@ struct net_local {
#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
#define RX_BUF_END (dev->mem_end - dev->mem_start)
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
/*
That's it: only 86 bytes to set up the beast, including every extra
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226a7c43..d2bb4b254c57 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
#define WAIT_TX_AVAIL 200
/* Operational parameter that usually are not changed. */
-#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
/* The size here is somewhat misleading: the Corkscrew also uses the ISA
aliased registers at <base>+0x400.
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258bbfb7..0a92436f0538 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_PCI(dev) NULL
#endif
-#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
+#define VORTEX_PCI(vp) \
+ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
#ifdef CONFIG_EISA
#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_EISA(dev) NULL
#endif
-#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
+#define VORTEX_EISA(vp) \
+ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
/* The action to take with a media selection timer tick.
Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd332ea..dd16e83933a2 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
- if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+ if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+ ((protocol == RxProtoUDP) && !(status & UDPFail)))
return 1;
- else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
- return 1;
- else if ((protocol == RxProtoIP) && (!(status & IPFail)))
- return 1;
- return 0;
+ else
+ return 0;
}
static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b949f9..be1f1970c842 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@ enum commands {
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cdaac85..a11dc735752c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1533,7 +1533,7 @@ config E100
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
- to identify the adapter.
+ to identify the adapter.
For the latest Intel PRO/100 network driver for Linux, see:
@@ -1786,17 +1786,17 @@ config KS8842
tristate "Micrel KSZ8841/42 with generic bus interface"
depends on HAS_IOMEM && DMA_ENGINE
help
- This platform driver is for KSZ8841(1-port) / KS8842(2-port)
- ethernet switch chip (managed, VLAN, QoS) from Micrel or
- Timberdale(FPGA).
+ This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+ ethernet switch chip (managed, VLAN, QoS) from Micrel or
+ Timberdale(FPGA).
config KS8851
- tristate "Micrel KS8851 SPI"
- depends on SPI
- select MII
+ tristate "Micrel KS8851 SPI"
+ depends on SPI
+ select MII
select CRC32
- help
- SPI driver for Micrel KS8851 SPI attached network chip.
+ help
+ SPI driver for Micrel KS8851 SPI attached network chip.
config KS8851_MLL
tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@ config IP1000
will be called ipg. This is recommended.
config IGB
- tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82575/82576 gigabit ethernet family of
- adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
+ tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here. The module
- will be called igb.
+ To compile this driver as a module, choose M here. The module
+ will be called igb.
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@ config IGB_DCA
is used, with the intent of lessening the impact of cache misses.
config IGBVF
- tristate "Intel(R) 82576 Virtual Function Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82576 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ tristate "Intel(R) 82576 Virtual Function Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82576 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here. The module
- will be called igbvf.
+ To compile this driver as a module, choose M here. The module
+ will be called igbvf.
source "drivers/net/ixp2000/Kconfig"
@@ -2300,14 +2300,14 @@ config SKGE
will be called skge. This is recommended.
config SKGE_DEBUG
- bool "Debugging interface"
- depends on SKGE && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/skge/ethX displays the state of the internal
- transmit and receive rings.
+ bool "Debugging interface"
+ depends on SKGE && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/skge/ethX displays the state of the internal
+ transmit and receive rings.
- If unsure, say N.
+ If unsure, say N.
config SKY2
tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@ config SKY2
will be called sky2. This is recommended.
config SKY2_DEBUG
- bool "Debugging interface"
- depends on SKY2 && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/sky2/ethX displays the state of the internal
- transmit and receive rings.
+ bool "Debugging interface"
+ depends on SKY2 && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+ transmit and receive rings.
- If unsure, say N.
+ If unsure, say N.
config VIA_VELOCITY
tristate "VIA Velocity support"
@@ -2389,12 +2389,12 @@ config SPIDER_NET
Cell Processor-Based Blades from IBM.
config TSI108_ETH
- tristate "Tundra TSI108 gigabit Ethernet support"
- depends on TSI108_BRIDGE
- help
- This driver supports Tundra TSI108 gigabit Ethernet ports.
- To compile this driver as a module, choose M here: the module
- will be called tsi108_eth.
+ tristate "Tundra TSI108 gigabit Ethernet support"
+ depends on TSI108_BRIDGE
+ help
+ This driver supports Tundra TSI108 gigabit Ethernet ports.
+ To compile this driver as a module, choose M here: the module
+ will be called tsi108_eth.
config GELIC_NET
tristate "PS3 Gigabit Ethernet driver"
@@ -2573,32 +2573,32 @@ config MDIO
tristate
config CHELSIO_T1
- tristate "Chelsio 10Gb Ethernet support"
- depends on PCI
+ tristate "Chelsio 10Gb Ethernet support"
+ depends on PCI
select CRC32
select MDIO
- help
- This driver supports Chelsio gigabit and 10-gigabit
- Ethernet cards. More information about adapter features and
+ help
+ This driver supports Chelsio gigabit and 10-gigabit
+ Ethernet cards. More information about adapter features and
performance tuning is in <file:Documentation/networking/cxgb.txt>.
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
- Please send feedback to <linux-bugs@chelsio.com>.
+ Please send feedback to <linux-bugs@chelsio.com>.
- To compile this driver as a module, choose M here: the module
- will be called cxgb.
+ To compile this driver as a module, choose M here: the module
+ will be called cxgb.
config CHELSIO_T1_1G
- bool "Chelsio gigabit Ethernet support"
- depends on CHELSIO_T1
- help
- Enables support for Chelsio's gigabit Ethernet PCI cards. If you
- are using only 10G cards say 'N' here.
+ bool "Chelsio gigabit Ethernet support"
+ depends on CHELSIO_T1
+ help
+ Enables support for Chelsio's gigabit Ethernet PCI cards. If you
+ are using only 10G cards say 'N' here.
config CHELSIO_T3_DEPENDS
tristate
@@ -2728,26 +2728,26 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
- depends on PCI_MSI
- ---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
- <http://support.intel.com/support/network/sb/CS-008441.htm>
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
- To compile this driver as a module, choose M here. The module
- will be called ixgbevf. MSI-X interrupt support is required
- for this driver to work correctly.
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
config IXGB
tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@ config IXGB
will be called ixgb.
config S2IO
- tristate "S2IO 10Gbe XFrame NIC"
+ tristate "Exar Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
- This driver supports the 10Gbe XFrame NIC of S2IO.
+ This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
+ To compile this driver as a module, choose M here. The module
+ will be called s2io.
+
config VXGE
- tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
+ tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
depends on PCI && INET
---help---
- This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
+ This driver supports Exar Corp's X3100 Series 10 GbE PCIe
I/O Virtualized Server Adapter.
+
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
+ To compile this driver as a module, choose M here. The module
+ will be called vxge.
+
config VXGE_DEBUG_TRACE_ALL
bool "Enabling All Debug trace statments in driver"
default n
depends on VXGE
---help---
Say Y here if you want to enabling all the debug trace statements in
- driver. By default only few debug trace statements are enabled.
+ the vxge driver. By default only few debug trace statements are
+ enabled.
config MYRI10GE
tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@ config QLGE
will be called qlge.
config BNA
- tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
- depends on PCI
- ---help---
- This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
- cards.
- To compile this driver as a module, choose M here: the module
- will be called bna.
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
- For general information and support, go to the Brocade support
- website at:
+ For general information and support, go to the Brocade support
+ website at:
- <http://support.brocade.com>
+ <http://support.brocade.com>
source "drivers/net/sfc/Kconfig"
@@ -3227,18 +3236,18 @@ config PPP_BSDCOMP
modules once you have said "make modules". If unsure, say N.
config PPP_MPPE
- tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
- select CRYPTO
- select CRYPTO_SHA1
- select CRYPTO_ARC4
- select CRYPTO_ECB
- ---help---
- Support for the MPPE Encryption protocol, as employed by the
- Microsoft Point-to-Point Tunneling Protocol.
-
- See http://pptpclient.sourceforge.net/ for information on
- configuring PPTP clients and servers to utilize this method.
+ tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
+ depends on PPP && EXPERIMENTAL
+ select CRYPTO
+ select CRYPTO_SHA1
+ select CRYPTO_ARC4
+ select CRYPTO_ECB
+ ---help---
+ Support for the MPPE Encryption protocol, as employed by the
+ Microsoft Point-to-Point Tunneling Protocol.
+
+ See http://pptpclient.sourceforge.net/ for information on
+ configuring PPTP clients and servers to utilize this method.
config PPPOE
tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@ config VIRTIO_NET
depends on EXPERIMENTAL && VIRTIO
---help---
This is the virtual network driver for virtio. It can be used with
- lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+ lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config VMXNET3
- tristate "VMware VMXNET3 ethernet driver"
- depends on PCI && INET
- help
- This driver supports VMware's vmxnet3 virtual ethernet NIC.
- To compile this driver as a module, choose M here: the
- module will be called vmxnet3.
+ tristate "VMware VMXNET3 ethernet driver"
+ depends on PCI && INET
+ help
+ This driver supports VMware's vmxnet3 virtual ethernet NIC.
+ To compile this driver as a module, choose M here: the
+ module will be called vmxnet3.
endif # NETDEVICES
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a06c24..bfea499a3513 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
#define TX_DESC_SIZE 10
#define MAX_RBUFF_SZ 0x600
#define MAX_TBUFF_SZ 0x600
-#define TX_TIMEOUT 50
+#define TX_TIMEOUT (HZ/2)
#define DELAY 1000
#define CAM0 0x0
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 89876897a6fe..871b1633f543 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@ struct net_local {
#define PORT_OFFSET(o) (o)
-#define TX_TIMEOUT 10
+#define TX_TIMEOUT (HZ/10)
/* Index to functions, as function prototypes. */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb7bca1..ce0091eb06f5 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_TIMEOUT 20
+#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 43579b3b24ac..53363108994e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
atl1_pcie_patch(adapter);
/* assume we have no link for now */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b6da4cf3694b..4bebff3faeab 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -325,7 +325,7 @@ static void ax_block_output(struct net_device *dev, int count,
static void
ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
unsigned int memr;
@@ -364,7 +364,7 @@ ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
static unsigned int
ax_phy_ei_inbits(struct net_device *dev, int no)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
unsigned int memr;
unsigned int result = 0;
@@ -412,7 +412,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
static int
ax_phy_read(struct net_device *dev, int phy_addr, int reg)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
unsigned int result;
@@ -435,7 +435,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
static void
ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
{
- struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
unsigned long flags;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index c36cd2ffbadc..93354eee2cfd 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -2458,6 +2458,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
int status, i = 0, num_imgs = 0;
const u8 *p;
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -EPERM;
+ }
+
strcpy(fw_file, func);
status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9eea225decaf..863e73a85fbe 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-3"
-#define DRV_MODULE_RELDATE "2010/10/19"
+#define DRV_MODULE_VERSION "1.60.00-4"
+#define DRV_MODULE_RELDATE "2010/11/01"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 459614d2d7bc..94d5f59d5a6f 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1680,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
rc = XMIT_PLAIN;
else {
- if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
rc = XMIT_CSUM_V6;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23a0e82..4cfd4e9b5586 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[57]; /* 0x1A8 */
+ u32 Reserved1[56]; /* 0x1A8 */
+ u32 default_cfg; /* 0x288 */
+ /* Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 2326774df843..38aeffef2a83 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -610,7 +610,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- udelay(10);
+ msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -3525,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
/* Enable CL37 BAM */
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, val | 1);
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -3898,7 +3904,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
- return 0;;
+ return 0;
msleep(1);
}
return -EINVAL;
@@ -3982,7 +3988,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
- return 0;;
+ return 0;
msleep(1);
}
@@ -5302,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
- bnx2x_wait_reset_complete(bp, phy);
+
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -5431,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port, initialize = 1;
+ u8 port, initialize = 1;
u16 val;
u16 temp;
u32 actual_phy_selection;
@@ -5450,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
msleep(1);
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(200); /* 100 is not enough */
-
+ bnx2x_wait_reset_complete(bp, phy);
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
/* BCM84823 requires that XGXS links up first @ 10G for normal
behavior */
temp = vars->line_speed;
@@ -5625,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u8 port;
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
@@ -6928,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u8 phy_index, port = params->port;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -6966,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
}
}
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
if (params->phy[INT_PHY].link_reset)
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
@@ -6999,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
s8 port;
s8 port_of_path = 0;
+ bnx2x_ext_phy_hw_reset(bp, 0);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7021,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
return -EINVAL;
}
/* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
- bnx2x_ext_phy_hw_reset(bp, 1);
+ bnx2x_ext_phy_hw_reset(bp, 0);
msleep(5);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index e9ad16f00b56..92057d7058da 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -8078,7 +8078,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
int port = BP_PORT(bp);
u32 val, val2;
u32 config;
- u32 ext_phy_type, ext_phy_config;;
+ u32 ext_phy_type, ext_phy_config;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -9064,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
default:
pr_err("Unknown board_type (%ld), aborting\n",
ent->driver_data);
- return ENODEV;
+ return -ENODEV;
}
cid_count += CNIC_CONTEXT_USE;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 881914bc4e9c..48cf24ff4e6f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2474,8 +2474,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
goto out;
read_lock(&bond->lock);
- slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
- orig_dev);
+ slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
if (!slave)
goto out_unlock;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdb68a600382..0273ad0b57bb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -873,15 +873,11 @@ static void bond_mc_del(struct bonding *bond, void *addr)
static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;
- struct ip_mc_list *im;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
- if (in_dev) {
- for (im = in_dev->mc_list; im; im = im->next)
- ip_mc_rejoin_group(im);
- }
-
+ if (in_dev)
+ ip_mc_rejoin_groups(in_dev);
rcu_read_unlock();
}
@@ -3209,7 +3205,7 @@ out:
#ifdef CONFIG_PROC_FS
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(&dev_base_lock)
+ __acquires(RCU)
__acquires(&bond->lock)
{
struct bonding *bond = seq->private;
@@ -3218,7 +3214,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
int i;
/* make sure the bond won't be taken away */
- read_lock(&dev_base_lock);
+ rcu_read_lock();
read_lock(&bond->lock);
if (*pos == 0)
@@ -3248,12 +3244,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void bond_info_seq_stop(struct seq_file *seq, void *v)
__releases(&bond->lock)
- __releases(&dev_base_lock)
+ __releases(RCU)
{
struct bonding *bond = seq->private;
read_unlock(&bond->lock);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static void bond_info_show_master(struct seq_file *seq)
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12df6ca..ad3ae46a4c01 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -286,7 +286,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
return NULL;
}
- return (struct bonding *)netdev_priv(slave->dev->master);
+ return netdev_priv(slave->dev->master);
}
static inline bool bond_is_lb(const struct bonding *bond)
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533fe313..20da1996d354 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF SPI driver");
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
static int spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
module_param(spi_frm_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
module_param(spi_up_head_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
static const struct file_operations dbgfs_state_fops = {
.open = dbgfs_open,
.read = dbgfs_state,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static const struct file_operations dbgfs_frame_fops = {
.open = dbgfs_open,
.read = dbgfs_frame,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
u8 *dst = buf;
caif_assert(buf);
+ if (cfspi->slave && !cfspi->slave_talked)
+ cfspi->slave_talked = true;
+
do {
struct sk_buff *skb;
struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align) {
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1) {
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
*dst = (u8)(spad - 1);
dst += spad;
}
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
dst += epad;
dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align)
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1)
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
/*
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
} else {
/* Put back packet. */
skb_queue_head(&cfspi->qhead, skb);
+ break;
}
} while (pkts <= CAIF_MAX_SPI_PKTS);
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
{
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+ /*
+ * The slave device is the master on the link. Interrupts before the
+ * slave has transmitted are considered spurious.
+ */
+ if (cfspi->slave && !cfspi->slave_talked) {
+ printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+ return;
+ }
+
if (!in_interrupt())
spin_lock(&cfspi->lock);
if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
spin_unlock(&cfspi->lock);
/* Wake up the xfer thread. */
- wake_up_interruptible(&cfspi->wait);
+ if (assert)
+ wake_up_interruptible(&cfspi->wait);
}
static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes added to
* get the start of the payload aligned.
*/
- if (spi_down_head_align) {
+ if (spi_down_head_align > 1) {
spad = 1 + *src;
src += spad;
}
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes added to
* get the complete CAIF frame aligned.
*/
- epad = (pkt_len + spad) & spi_down_tail_align;
+ epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
src += epad;
} while ((src - buf) < len);
@@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
ndev = alloc_netdev(sizeof(struct cfspi),
"cfspi%d", cfspi_setup);
- if (!dev)
- return -ENODEV;
+ if (!ndev)
+ return -ENOMEM;
cfspi = netdev_priv(ndev);
netif_stop_queue(ndev);
cfspi->ndev = ndev;
cfspi->pdev = pdev;
- /* Set flow info */
+ /* Set flow info. */
cfspi->flow_off_sent = 0;
cfspi->qd_low_mark = LOW_WATER_MARK;
cfspi->qd_high_mark = HIGH_WATER_MARK;
+ /* Set slave info. */
+ if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+ cfspi->slave = true;
+ cfspi->slave_talked = false;
+ } else {
+ cfspi->slave = false;
+ cfspi->slave_talked = false;
+ }
+
/* Assign the SPI device. */
cfspi->dev = dev;
/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6fe..1b9943a4edab 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
#endif
int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align = 1 << 1;
+int spi_up_tail_align = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
#ifdef CONFIG_DEBUG_FS
static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378cd0c34..74cd880c7e06 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
- "bus-off state expected");
+ "bus-off state expected\n");
out_8(&regs->canmisc, MSCAN_BOHOLD);
/* Re-enable receive interrupts. */
out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 672718261c68..238622a04bc1 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -32,58 +32,57 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#define MAX_MSG_OBJ 32
-#define MSG_OBJ_RX 0 /* The receive message object flag. */
-#define MSG_OBJ_TX 1 /* The transmit message object flag. */
-
-#define ENABLE 1 /* The enable flag */
-#define DISABLE 0 /* The disable flag */
-#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
-#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
-#define CAN_CTRL_IE_SIE_EIE 0x000e
-#define CAN_CTRL_CCE 0x0040
-#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
-#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
-#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
-#define CAN_CMASK_RX_TX_SET 0x00f3
-#define CAN_CMASK_RX_TX_GET 0x0073
-#define CAN_CMASK_ALL 0xff
-#define CAN_CMASK_RDWR 0x80
-#define CAN_CMASK_ARB 0x20
-#define CAN_CMASK_CTRL 0x10
-#define CAN_CMASK_MASK 0x40
-#define CAN_CMASK_NEWDAT 0x04
-#define CAN_CMASK_CLRINTPND 0x08
-
-#define CAN_IF_MCONT_NEWDAT 0x8000
-#define CAN_IF_MCONT_INTPND 0x2000
-#define CAN_IF_MCONT_UMASK 0x1000
-#define CAN_IF_MCONT_TXIE 0x0800
-#define CAN_IF_MCONT_RXIE 0x0400
-#define CAN_IF_MCONT_RMTEN 0x0200
-#define CAN_IF_MCONT_TXRQXT 0x0100
-#define CAN_IF_MCONT_EOB 0x0080
-#define CAN_IF_MCONT_DLC 0x000f
-#define CAN_IF_MCONT_MSGLOST 0x4000
-#define CAN_MASK2_MDIR_MXTD 0xc000
-#define CAN_ID2_DIR 0x2000
-#define CAN_ID_MSGVAL 0x8000
-
-#define CAN_STATUS_INT 0x8000
-#define CAN_IF_CREQ_BUSY 0x8000
-#define CAN_ID2_XTD 0x4000
-
-#define CAN_REC 0x00007f00
-#define CAN_TEC 0x000000ff
-
-#define PCH_RX_OK 0x00000010
-#define PCH_TX_OK 0x00000008
-#define PCH_BUS_OFF 0x00000080
-#define PCH_EWARN 0x00000040
-#define PCH_EPASSIV 0x00000020
-#define PCH_LEC0 0x00000001
-#define PCH_LEC1 0x00000002
-#define PCH_LEC2 0x00000004
+#define PCH_MAX_MSG_OBJ 32
+#define PCH_MSG_OBJ_RX 0 /* The receive message object flag. */
+#define PCH_MSG_OBJ_TX 1 /* The transmit message object flag. */
+
+#define PCH_ENABLE 1 /* The enable flag */
+#define PCH_DISABLE 0 /* The disable flag */
+#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
+#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
+#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
+#define PCH_CTRL_CCE BIT(6)
+#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
+#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
+#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
+
+#define PCH_CMASK_RX_TX_SET 0x00f3
+#define PCH_CMASK_RX_TX_GET 0x0073
+#define PCH_CMASK_ALL 0xff
+#define PCH_CMASK_NEWDAT BIT(2)
+#define PCH_CMASK_CLRINTPND BIT(3)
+#define PCH_CMASK_CTRL BIT(4)
+#define PCH_CMASK_ARB BIT(5)
+#define PCH_CMASK_MASK BIT(6)
+#define PCH_CMASK_RDWR BIT(7)
+#define PCH_IF_MCONT_NEWDAT BIT(15)
+#define PCH_IF_MCONT_MSGLOST BIT(14)
+#define PCH_IF_MCONT_INTPND BIT(13)
+#define PCH_IF_MCONT_UMASK BIT(12)
+#define PCH_IF_MCONT_TXIE BIT(11)
+#define PCH_IF_MCONT_RXIE BIT(10)
+#define PCH_IF_MCONT_RMTEN BIT(9)
+#define PCH_IF_MCONT_TXRQXT BIT(8)
+#define PCH_IF_MCONT_EOB BIT(7)
+#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
+#define PCH_ID2_DIR BIT(13)
+#define PCH_ID2_XTD BIT(14)
+#define PCH_ID_MSGVAL BIT(15)
+#define PCH_IF_CREQ_BUSY BIT(15)
+
+#define PCH_STATUS_INT 0x8000
+#define PCH_REC 0x00007f00
+#define PCH_TEC 0x000000ff
+
+#define PCH_TX_OK BIT(3)
+#define PCH_RX_OK BIT(4)
+#define PCH_EPASSIV BIT(5)
+#define PCH_EWARN BIT(6)
+#define PCH_BUS_OFF BIT(7)
+#define PCH_LEC0 BIT(0)
+#define PCH_LEC1 BIT(1)
+#define PCH_LEC2 BIT(2)
#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
#define PCH_STUF_ERR PCH_LEC0
#define PCH_FORM_ERR PCH_LEC1
@@ -93,26 +92,15 @@
#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
/* bit position of certain controller bits. */
-#define BIT_BITT_BRP 0
-#define BIT_BITT_SJW 6
-#define BIT_BITT_TSEG1 8
-#define BIT_BITT_TSEG2 12
-#define BIT_IF1_MCONT_RXIE 10
-#define BIT_IF2_MCONT_TXIE 11
-#define BIT_BRPE_BRPE 6
-#define BIT_ES_TXERRCNT 0
-#define BIT_ES_RXERRCNT 8
-#define MSK_BITT_BRP 0x3f
-#define MSK_BITT_SJW 0xc0
-#define MSK_BITT_TSEG1 0xf00
-#define MSK_BITT_TSEG2 0x7000
-#define MSK_BRPE_BRPE 0x3c0
-#define MSK_BRPE_GET 0x0f
-#define MSK_CTRL_IE_SIE_EIE 0x07
-#define MSK_MCONT_TXIE 0x08
-#define MSK_MCONT_RXIE 0x10
-#define PCH_CAN_NO_TX_BUFF 1
-#define COUNTER_LIMIT 10
+#define PCH_BIT_BRP 0
+#define PCH_BIT_SJW 6
+#define PCH_BIT_TSEG1 8
+#define PCH_BIT_TSEG2 12
+#define PCH_BIT_BRPE_BRPE 6
+#define PCH_MSK_BITT_BRP 0x3f
+#define PCH_MSK_BRPE_BRPE 0x3c0
+#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
+#define PCH_COUNTER_LIMIT 10
#define PCH_CAN_CLK 50000000 /* 50MHz */
@@ -181,14 +169,14 @@ struct pch_can_priv {
struct can_priv can;
unsigned int can_num;
struct pci_dev *dev;
- unsigned int tx_enable[MAX_MSG_OBJ];
- unsigned int rx_enable[MAX_MSG_OBJ];
- unsigned int rx_link[MAX_MSG_OBJ];
+ unsigned int tx_enable[PCH_MAX_MSG_OBJ];
+ unsigned int rx_enable[PCH_MAX_MSG_OBJ];
+ unsigned int rx_link[PCH_MAX_MSG_OBJ];
unsigned int int_enables;
unsigned int int_stat;
struct net_device *ndev;
spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
- unsigned int msg_obj[MAX_MSG_OBJ];
+ unsigned int msg_obj[PCH_MAX_MSG_OBJ];
struct pch_can_regs __iomem *regs;
struct napi_struct napi;
unsigned int tx_obj; /* Point next Tx Obj index */
@@ -228,11 +216,11 @@ static void pch_can_set_run_mode(struct pch_can_priv *priv,
{
switch (mode) {
case PCH_CAN_RUN:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
break;
case PCH_CAN_STOP:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
break;
default:
@@ -246,30 +234,30 @@ static void pch_can_set_optmode(struct pch_can_priv *priv)
u32 reg_val = ioread32(&priv->regs->opt);
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- reg_val |= CAN_OPT_SILENT;
+ reg_val |= PCH_OPT_SILENT;
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
- reg_val |= CAN_OPT_LBACK;
+ reg_val |= PCH_OPT_LBACK;
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
iowrite32(reg_val, &priv->regs->opt);
}
static void pch_can_set_int_custom(struct pch_can_priv *priv)
{
/* Clearing the IE, SIE and EIE bits of Can control register. */
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
/* Appropriately setting them. */
pch_can_bit_set(&priv->regs->cont,
- ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+ ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
}
/* This function retrieves interrupt enabled for the CAN device. */
static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
{
/* Obtaining the status of IE, SIE and EIE interrupt bits. */
- *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+ *enables = ((ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1);
}
static void pch_can_set_int_enables(struct pch_can_priv *priv,
@@ -277,19 +265,19 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
{
switch (interrupt_no) {
case PCH_CAN_ENABLE:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE);
break;
case PCH_CAN_DISABLE:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
break;
case PCH_CAN_ALL:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
break;
case PCH_CAN_NONE:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
break;
default:
@@ -300,12 +288,12 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
{
- u32 counter = COUNTER_LIMIT;
+ u32 counter = PCH_COUNTER_LIMIT;
u32 ifx_creq;
iowrite32(num, creq_addr);
while (counter) {
- ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+ ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
if (!ifx_creq)
break;
counter--;
@@ -322,22 +310,22 @@ static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
/* Reading the receive buffer data from RAM to Interface1 registers */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
/* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
&priv->regs->if1_cmask);
- if (set == ENABLE) {
+ if (set == PCH_ENABLE) {
/* Setting the MsgVal and RxIE bits */
- pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
- pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+ pch_can_bit_set(&priv->regs->if1_mcont, PCH_IF_MCONT_RXIE);
+ pch_can_bit_set(&priv->regs->if1_id2, PCH_ID_MSGVAL);
- } else if (set == DISABLE) {
+ } else if (set == PCH_DISABLE) {
/* Resetting the MsgVal and RxIE bits */
- pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+ pch_can_bit_clear(&priv->regs->if1_mcont, PCH_IF_MCONT_RXIE);
+ pch_can_bit_clear(&priv->regs->if1_id2, PCH_ID_MSGVAL);
}
pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
@@ -350,8 +338,8 @@ static void pch_can_rx_enable_all(struct pch_can_priv *priv)
/* Traversing to obtain the object configured as receivers. */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX)
- pch_can_set_rx_enable(priv, i + 1, ENABLE);
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, PCH_ENABLE);
}
}
@@ -361,8 +349,8 @@ static void pch_can_rx_disable_all(struct pch_can_priv *priv)
/* Traversing to obtain the object configured as receivers. */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX)
- pch_can_set_rx_enable(priv, i + 1, DISABLE);
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, PCH_DISABLE);
}
}
@@ -373,22 +361,22 @@ static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
/* Reading the Msg buffer from Message RAM to Interface2 registers. */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
/* Setting the IF2CMASK register for accessing the
MsgVal and TxIE bits */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
&priv->regs->if2_cmask);
- if (set == ENABLE) {
+ if (set == PCH_ENABLE) {
/* Setting the MsgVal and TxIE bits */
- pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
- pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
- } else if (set == DISABLE) {
+ pch_can_bit_set(&priv->regs->if2_mcont, PCH_IF_MCONT_TXIE);
+ pch_can_bit_set(&priv->regs->if2_id2, PCH_ID_MSGVAL);
+ } else if (set == PCH_DISABLE) {
/* Resetting the MsgVal and TxIE bits. */
- pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
- pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ pch_can_bit_clear(&priv->regs->if2_mcont, PCH_IF_MCONT_TXIE);
+ pch_can_bit_clear(&priv->regs->if2_id2, PCH_ID_MSGVAL);
}
pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
@@ -401,8 +389,8 @@ static void pch_can_tx_enable_all(struct pch_can_priv *priv)
/* Traversing to obtain the object configured as transmit object. */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_set_tx_enable(priv, i + 1, ENABLE);
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, PCH_ENABLE);
}
}
@@ -412,8 +400,8 @@ static void pch_can_tx_disable_all(struct pch_can_priv *priv)
/* Traversing to obtain the object configured as transmit object. */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_set_tx_enable(priv, i + 1, DISABLE);
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, PCH_DISABLE);
}
}
@@ -423,15 +411,15 @@ static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
unsigned long flags;
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
- if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+ if (((ioread32(&priv->regs->if1_id2)) & PCH_ID_MSGVAL) &&
((ioread32(&priv->regs->if1_mcont)) &
- CAN_IF_MCONT_RXIE))
- *enable = ENABLE;
+ PCH_IF_MCONT_RXIE))
+ *enable = PCH_ENABLE;
else
- *enable = DISABLE;
+ *enable = PCH_DISABLE;
spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
}
@@ -441,15 +429,15 @@ static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
unsigned long flags;
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
- if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+ if (((ioread32(&priv->regs->if2_id2)) & PCH_ID_MSGVAL) &&
((ioread32(&priv->regs->if2_mcont)) &
- CAN_IF_MCONT_TXIE)) {
- *enable = ENABLE;
+ PCH_IF_MCONT_TXIE)) {
+ *enable = PCH_ENABLE;
} else {
- *enable = DISABLE;
+ *enable = PCH_DISABLE;
}
spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
}
@@ -465,13 +453,13 @@ static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
- if (set == ENABLE)
- pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL, &priv->regs->if1_cmask);
+ if (set == PCH_ENABLE)
+ pch_can_bit_clear(&priv->regs->if1_mcont, PCH_IF_MCONT_EOB);
else
- pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+ pch_can_bit_set(&priv->regs->if1_mcont, PCH_IF_MCONT_EOB);
pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
@@ -483,13 +471,13 @@ static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
- if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
- *link = DISABLE;
+ if (ioread32(&priv->regs->if1_mcont) & PCH_IF_MCONT_EOB)
+ *link = PCH_DISABLE;
else
- *link = ENABLE;
+ *link = PCH_ENABLE;
spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
}
@@ -498,7 +486,7 @@ static void pch_can_clear_buffers(struct pch_can_priv *priv)
int i;
for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
- iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
iowrite32(0xffff, &priv->regs->if1_mask1);
iowrite32(0xffff, &priv->regs->if1_mask2);
iowrite32(0x0, &priv->regs->if1_id1);
@@ -508,14 +496,14 @@ static void pch_can_clear_buffers(struct pch_can_priv *priv)
iowrite32(0x0, &priv->regs->if1_dataa2);
iowrite32(0x0, &priv->regs->if1_datab1);
iowrite32(0x0, &priv->regs->if1_datab2);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+ PCH_CMASK_ARB | PCH_CMASK_CTRL,
&priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
}
for (i = i; i < PCH_OBJ_NUM; i++) {
- iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+ iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
iowrite32(0xffff, &priv->regs->if2_mask1);
iowrite32(0xffff, &priv->regs->if2_mask2);
iowrite32(0x0, &priv->regs->if2_id1);
@@ -525,8 +513,8 @@ static void pch_can_clear_buffers(struct pch_can_priv *priv)
iowrite32(0x0, &priv->regs->if2_dataa2);
iowrite32(0x0, &priv->regs->if2_datab1);
iowrite32(0x0, &priv->regs->if2_datab2);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+ PCH_CMASK_ARB | PCH_CMASK_CTRL,
&priv->regs->if2_cmask);
pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
}
@@ -540,8 +528,8 @@ static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
- iowrite32(CAN_CMASK_RX_TX_GET,
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_RX) {
+ iowrite32(PCH_CMASK_RX_TX_GET,
&priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
@@ -549,48 +537,48 @@ static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
iowrite32(0x0, &priv->regs->if1_id2);
pch_can_bit_set(&priv->regs->if1_mcont,
- CAN_IF_MCONT_UMASK);
+ PCH_IF_MCONT_UMASK);
/* Set FIFO mode set to 0 except last Rx Obj*/
pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_EOB);
+ PCH_IF_MCONT_EOB);
/* In case FIFO mode, Last EoB of Rx Obj must be 1 */
if (i == (PCH_RX_OBJ_NUM - 1))
pch_can_bit_set(&priv->regs->if1_mcont,
- CAN_IF_MCONT_EOB);
+ PCH_IF_MCONT_EOB);
iowrite32(0, &priv->regs->if1_mask1);
pch_can_bit_clear(&priv->regs->if1_mask2,
- 0x1fff | CAN_MASK2_MDIR_MXTD);
+ 0x1fff | PCH_MASK2_MDIR_MXTD);
/* Setting CMASK for writing */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+ PCH_CMASK_ARB | PCH_CMASK_CTRL,
&priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
- } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
- iowrite32(CAN_CMASK_RX_TX_GET,
+ } else if (priv->msg_obj[i] == PCH_MSG_OBJ_TX) {
+ iowrite32(PCH_CMASK_RX_TX_GET,
&priv->regs->if2_cmask);
pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
/* Resetting DIR bit for reception */
iowrite32(0x0, &priv->regs->if2_id1);
iowrite32(0x0, &priv->regs->if2_id2);
- pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+ pch_can_bit_set(&priv->regs->if2_id2, PCH_ID2_DIR);
/* Setting EOB bit for transmitter */
- iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+ iowrite32(PCH_IF_MCONT_EOB, &priv->regs->if2_mcont);
pch_can_bit_set(&priv->regs->if2_mcont,
- CAN_IF_MCONT_UMASK);
+ PCH_IF_MCONT_UMASK);
iowrite32(0, &priv->regs->if2_mask1);
pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
/* Setting CMASK for writing */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+ PCH_CMASK_ARB | PCH_CMASK_CTRL,
&priv->regs->if2_cmask);
pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
@@ -632,39 +620,39 @@ static void pch_can_release(struct pch_can_priv *priv)
/* This function clears interrupt(s) from the CAN device. */
static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
{
- if (mask == CAN_STATUS_INT) {
+ if (mask == PCH_STATUS_INT) {
ioread32(&priv->regs->stat);
return;
}
/* Clear interrupt for transmit object */
- if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+ if (priv->msg_obj[mask - 1] == PCH_MSG_OBJ_TX) {
/* Setting CMASK for clearing interrupts for
frame transmission. */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
&priv->regs->if2_cmask);
/* Resetting the ID registers. */
pch_can_bit_set(&priv->regs->if2_id2,
- CAN_ID2_DIR | (0x7ff << 2));
+ PCH_ID2_DIR | (0x7ff << 2));
iowrite32(0x0, &priv->regs->if2_id1);
/* Claring NewDat, TxRqst & IntPnd */
pch_can_bit_clear(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
- CAN_IF_MCONT_TXRQXT);
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+ PCH_IF_MCONT_TXRQXT);
pch_can_check_if_busy(&priv->regs->if2_creq, mask);
- } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+ } else if (priv->msg_obj[mask - 1] == PCH_MSG_OBJ_RX) {
/* Setting CMASK for clearing the reception interrupts. */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
&priv->regs->if1_cmask);
/* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+ pch_can_bit_clear(&priv->regs->if1_id2, PCH_ID2_DIR);
/* Clearing NewDat & IntPnd */
pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
pch_can_check_if_busy(&priv->regs->if1_creq, mask);
}
@@ -712,9 +700,9 @@ static void pch_can_error(struct net_device *ndev, u32 status)
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
errc = ioread32(&priv->regs->errc);
- if (((errc & CAN_REC) >> 8) > 96)
+ if (((errc & PCH_REC) >> 8) > 96)
cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((errc & CAN_TEC) > 96)
+ if ((errc & PCH_TEC) > 96)
cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
dev_warn(&ndev->dev,
"%s -> Error Counter is more than 96.\n", __func__);
@@ -725,9 +713,9 @@ static void pch_can_error(struct net_device *ndev, u32 status)
state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
errc = ioread32(&priv->regs->errc);
- if (((errc & CAN_REC) >> 8) > 127)
+ if (((errc & PCH_REC) >> 8) > 127)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- if ((errc & CAN_TEC) > 127)
+ if ((errc & PCH_TEC) > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
dev_err(&ndev->dev,
"%s -> CAN controller is ERROR PASSIVE .\n", __func__);
@@ -795,20 +783,20 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
struct net_device_stats *stats = &(priv->ndev->stats);
/* Reading the messsage object from the Message RAM */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
/* Reading the MCONT register. */
reg = ioread32(&priv->regs->if1_mcont);
reg &= 0xffff;
- for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+ for (k = int_stat; !(reg & PCH_IF_MCONT_EOB); k++) {
/* If MsgLost bit set. */
- if (reg & CAN_IF_MCONT_MSGLOST) {
+ if (reg & PCH_IF_MCONT_MSGLOST) {
dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_MSGLOST);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+ PCH_IF_MCONT_MSGLOST);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
&priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, k);
@@ -828,7 +816,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
rcv_pkts++;
goto RX_NEXT;
}
- if (!(reg & CAN_IF_MCONT_NEWDAT))
+ if (!(reg & PCH_IF_MCONT_NEWDAT))
goto RX_NEXT;
skb = alloc_can_skb(priv->ndev, &cf);
@@ -836,7 +824,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
return -ENOMEM;
/* Get Received data */
- ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+ ide = ((ioread32(&priv->regs->if1_id2)) & PCH_ID2_XTD) >> 14;
if (ide) {
id = (ioread32(&priv->regs->if1_id1) & 0xffff);
id |= (((ioread32(&priv->regs->if1_id2)) &
@@ -848,7 +836,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
cf->can_id = (id & CAN_SFF_MASK);
}
- rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
+ rtr = (ioread32(&priv->regs->if1_id2) & PCH_ID2_DIR);
if (rtr) {
cf->can_dlc = 0;
cf->can_id |= CAN_RTR_FLAG;
@@ -871,15 +859,15 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
stats->rx_bytes += cf->can_dlc;
if (k < PCH_FIFO_THRESH) {
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
- CAN_CMASK_ARB, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
+ PCH_CMASK_ARB, &priv->regs->if1_cmask);
/* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+ pch_can_bit_clear(&priv->regs->if1_id2, PCH_ID2_DIR);
/* Clearing NewDat & IntPnd */
pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_INTPND);
+ PCH_IF_MCONT_INTPND);
pch_can_check_if_busy(&priv->regs->if1_creq, k);
} else if (k > PCH_FIFO_THRESH) {
pch_can_int_clr(priv, k);
@@ -890,7 +878,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
}
RX_NEXT:
/* Reading the messsage object from the Message RAM */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
reg = ioread32(&priv->regs->if1_mcont);
}
@@ -913,7 +901,7 @@ static int pch_can_rx_poll(struct napi_struct *napi, int quota)
return 0;
INT_STAT:
- if (int_stat == CAN_STATUS_INT) {
+ if (int_stat == PCH_STATUS_INT) {
reg_stat = ioread32(&priv->regs->stat);
if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
@@ -922,7 +910,7 @@ INT_STAT:
if (reg_stat & PCH_TX_OK) {
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
pch_can_check_if_busy(&priv->regs->if2_creq,
ioread32(&priv->regs->intr));
spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
@@ -933,7 +921,7 @@ INT_STAT:
pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
int_stat = pch_can_int_pending(priv);
- if (int_stat == CAN_STATUS_INT)
+ if (int_stat == PCH_STATUS_INT)
goto INT_STAT;
}
@@ -945,14 +933,14 @@ MSG_OBJ:
if (rcv_pkts < 0)
return 0;
} else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
- if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+ if (priv->msg_obj[int_stat - 1] == PCH_MSG_OBJ_TX) {
/* Handle transmission interrupt */
can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+ iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
&priv->regs->if2_cmask);
dlc = ioread32(&priv->regs->if2_mcont) &
- CAN_IF_MCONT_DLC;
+ PCH_IF_MCONT_DLC;
pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
if (dlc > 8)
@@ -963,7 +951,7 @@ MSG_OBJ:
}
int_stat = pch_can_int_pending(priv);
- if (int_stat == CAN_STATUS_INT)
+ if (int_stat == PCH_STATUS_INT)
goto INT_STAT;
else if (int_stat >= 1 && int_stat <= 32)
goto MSG_OBJ;
@@ -983,17 +971,17 @@ static int pch_set_bittiming(struct net_device *ndev)
u32 brp;
/* Setting the CCE bit for accessing the Can Timing register. */
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
- canbit = brp & MSK_BITT_BRP;
- canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
- canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
- canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
- bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+ canbit = brp & PCH_MSK_BITT_BRP;
+ canbit |= (bt->sjw - 1) << PCH_BIT_SJW;
+ canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1;
+ canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2;
+ bepe = (brp & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE;
iowrite32(canbit, &priv->regs->bitt);
iowrite32(bepe, &priv->regs->brpe);
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
return 0;
}
@@ -1137,19 +1125,19 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&priv->msgif_reg_lock, flags);
/* Reading the Msg Obj from the Msg RAM to the Interface register. */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
/* Setting the CMASK register. */
- pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+ pch_can_bit_set(&priv->regs->if2_cmask, PCH_CMASK_ALL);
/* If ID extended is set. */
pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
- pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+ pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | PCH_ID2_XTD);
if (cf->can_id & CAN_EFF_FLAG) {
pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
pch_can_bit_set(&priv->regs->if2_id2,
- ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+ ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD);
} else {
pch_can_bit_set(&priv->regs->if2_id1, 0);
pch_can_bit_set(&priv->regs->if2_id2,
@@ -1158,7 +1146,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If remote frame has to be transmitted.. */
if (cf->can_id & CAN_RTR_FLAG)
- pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+ pch_can_bit_clear(&priv->regs->if2_id2, PCH_ID2_DIR);
for (i = 0, j = 0; i < cf->can_dlc; j++) {
iowrite32(le32_to_cpu(cf->data[i++]),
@@ -1177,12 +1165,12 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Clearing IntPend, NewDat & TxRqst */
pch_can_bit_clear(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
- CAN_IF_MCONT_TXRQXT);
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+ PCH_IF_MCONT_TXRQXT);
/* Setting NewDat, TxRqst bits */
pch_can_bit_set(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT);
pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
@@ -1245,7 +1233,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
/* Save Tx buffer enable state */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_TX)
pch_can_get_tx_enable(priv, i + 1,
&(priv->tx_enable[i]));
}
@@ -1255,7 +1243,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
/* Save Rx buffer enable state */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_RX) {
pch_can_get_rx_enable(priv, i + 1,
&(priv->rx_enable[i]));
pch_can_get_rx_buffer_link(priv, i + 1,
@@ -1313,7 +1301,7 @@ static int pch_can_resume(struct pci_dev *pdev)
/* Enabling the transmit buffer. */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_TX) {
pch_can_set_tx_enable(priv, i + 1,
priv->tx_enable[i]);
}
@@ -1321,7 +1309,7 @@ static int pch_can_resume(struct pci_dev *pdev)
/* Configuring the receive buffer and enabling them. */
for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ if (priv->msg_obj[i] == PCH_MSG_OBJ_RX) {
/* Restore buffer link */
pch_can_set_rx_buffer_link(priv, i + 1,
priv->rx_link[i]);
@@ -1349,8 +1337,8 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
{
struct pch_can_priv *priv = netdev_priv(dev);
- bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
- bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+ bec->txerr = ioread32(&priv->regs->errc) & PCH_TEC;
+ bec->rxerr = (ioread32(&priv->regs->errc) & PCH_REC) >> 8;
return 0;
}
@@ -1410,10 +1398,10 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
for (index = 0; index < PCH_RX_OBJ_NUM;)
- priv->msg_obj[index++] = MSG_OBJ_RX;
+ priv->msg_obj[index++] = PCH_MSG_OBJ_RX;
for (index = index; index < PCH_OBJ_NUM;)
- priv->msg_obj[index++] = MSG_OBJ_TX;
+ priv->msg_obj[index++] = PCH_MSG_OBJ_TX;
netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 5bfccfdf3bbb..09c3e9db9316 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -107,17 +107,13 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
res_size = resource_size(&res);
if (!request_mem_region(res.start, res_size, DRV_NAME)) {
- dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
return -EBUSY;
}
base = ioremap_nocache(res.start, res_size);
if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
err = -ENOMEM;
goto exit_release_mem;
}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 92bac19ad60a..594ca9c2c10a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1695,7 +1695,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
*work = num;
return -EINVAL;
}
- *work = 2 + req2->num_additional_wqes;;
+ *work = 2 + req2->num_additional_wqes;
l5_cid = req1->iscsi_conn_id;
if (l5_cid >= MAX_ISCSI_TBL_SZ)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 407d4e272075..046d846c652d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -3341,7 +3341,6 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->name = adapter->port[i]->name;
__set_bit(i, &adapter->registered_device_map);
- netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f17703f410b3..f50bc98310f8 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3736,7 +3736,6 @@ static int __devinit init_one(struct pci_dev *pdev,
__set_bit(i, &adapter->registered_device_map);
adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
- netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea01962e045..4766b4116b41 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@ enum {
* MSI-X interrupt index usage.
*/
MSIX_FW = 0, /* MSI-X index for firmware Q */
- MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */
+ MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
MSIX_EXTRAS = 1,
MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e93..9246d2fa6cf9 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
const struct port_info *pi = netdev_priv(dev);
int qs, msi;
- for (qs = 0, msi = MSIX_NIQFLINT;
- qs < pi->nqsets;
- qs++, msi++) {
+ for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
snprintf(adapter->msix_info[msi].desc, namelen,
"%s-%d", dev->name, qs);
adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
/*
* Ethernet queues.
*/
- msi = MSIX_NIQFLINT;
+ msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq) {
err = request_irq(adapter->msix_info[msi].vec,
t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
int rxq, msi;
free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
- msi = MSIX_NIQFLINT;
+ msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq)
free_irq(adapter->msix_info[msi++].vec,
&s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
* brought up at which point lots of things get nailed down
* permanently ...
*/
- msix = MSIX_NIQFLINT;
+ msix = MSIX_IQFLINT;
for_each_port(adapter, pidx) {
struct net_device *dev = adapter->port[pidx];
struct port_info *pi = netdev_priv(dev);
@@ -753,7 +751,9 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
return err;
set_bit(pi->port_id, &adapter->open_device_map);
- link_start(dev);
+ err = link_start(dev);
+ if (err)
+ return err;
netif_tx_start_all_queues(dev);
return 0;
}
@@ -1103,18 +1103,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
return 0;
}
-/*
- * Return a TX Queue on which to send the specified skb.
- */
-static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- /*
- * XXX For now just use the default hash but we probably want to
- * XXX look at other possibilities ...
- */
- return skb_tx_hash(dev, skb);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Poll all of our receive queues. This is called outside of normal interrupt
@@ -1358,6 +1346,8 @@ struct queue_port_stats {
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
+ u64 lro_pkts;
+ u64 lro_merged;
};
/*
@@ -1395,6 +1385,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
+ "GROPackets ",
+ "GROMerged ",
};
/*
@@ -1444,6 +1436,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
stats->rx_csum += rxq->stats.rx_cso;
stats->vlan_ex += rxq->stats.vlan_ex;
stats->vlan_ins += txq->vlan_ins;
+ stats->lro_pkts += rxq->stats.lro_pkts;
+ stats->lro_merged += rxq->stats.lro_merged;
}
}
@@ -1540,14 +1534,19 @@ static void cxgb4vf_get_wol(struct net_device *dev,
}
/*
+ * TCP Segmentation Offload flags which we support.
+ */
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+/*
* Set TCP Segmentation Offloading feature capabilities.
*/
static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
{
if (tso)
- dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ dev->features |= TSO_FLAGS;
else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ dev->features &= ~TSO_FLAGS;
return 0;
}
@@ -2038,7 +2037,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
* Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
* it to our caller to tear down the directory (debugfs_root).
*/
-static void __devexit cleanup_debugfs(struct adapter *adapter)
+static void cleanup_debugfs(struct adapter *adapter)
{
BUG_ON(adapter->debugfs_root == NULL);
@@ -2056,7 +2055,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
* adapter parameters we're going to be using and initialize basic adapter
* hardware support.
*/
-static int adap_init0(struct adapter *adapter)
+static int __devinit adap_init0(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
struct sge_params *sge_params = &adapter->params.sge;
@@ -2075,6 +2074,22 @@ static int adap_init0(struct adapter *adapter)
}
/*
+ * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+ * 2.6.31 and later we can't call pci_reset_function() in order to
+ * issue an FLR because of a self- deadlock on the device semaphore.
+ * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+ * cases where they're needed -- for instance, some versions of KVM
+ * fail to reset "Assigned Devices" when the VM reboots. Therefore we
+ * use the firmware based reset in order to reset any per function
+ * state.
+ */
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
* into the adapter. We just have to live with them ... Note that
@@ -2417,7 +2432,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_get_stats = cxgb4vf_get_stats,
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
- .ndo_select_queue = cxgb4vf_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2465,7 +2479,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
version_printed = 1;
}
-
/*
* Initialize generic PCI device state.
*/
@@ -2600,10 +2613,9 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
- netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ netdev->features = (NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_GRO);
@@ -2625,7 +2637,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->do_ioctl = cxgb4vf_do_ioctl;
netdev->change_mtu = cxgb4vf_change_mtu;
netdev->set_mac_address = cxgb4vf_set_mac_addr;
- netdev->select_queue = cxgb4vf_select_queue;
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = cxgb4vf_poll_controller;
#endif
@@ -2844,6 +2855,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
CH_DEVICE(0x4802, 0), /* T422-cr */
+ CH_DEVICE(0x4803, 0), /* T440-cr */
+ CH_DEVICE(0x4804, 0), /* T420-bch */
+ CH_DEVICE(0x4805, 0), /* T440-bch */
+ CH_DEVICE(0x4806, 0), /* T460-ch */
+ CH_DEVICE(0x4807, 0), /* T420-so */
+ CH_DEVICE(0x4808, 0), /* T420-cx */
+ CH_DEVICE(0x4809, 0), /* T420-bt */
+ CH_DEVICE(0x480a, 0), /* T404-bt */
{ 0, }
};
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..e0b3d1bc2fdf 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
*/
RX_COPY_THRES = 256,
RX_PULL_LEN = 128,
-};
-/*
- * Can't define this in the above enum because PKTSHIFT isn't a constant in
- * the VF Driver ...
- */
-#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+ /*
+ * Main body length for sk_buffs used for RX Ethernet packets with
+ * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
+ * pskb_may_pull() some room.
+ */
+ RX_SKB_LEN = 512,
+};
/*
* Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
}
/**
+ * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ *
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx
+ * buff! size, which is expected since buffers are at least
+ * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
+ * fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags-1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+
+out:
+ return skb;
+}
+
+/**
* t4vf_pktgl_free - free a packet gather list
* @gl: the gather list
*
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
struct port_info *pi;
- struct skb_shared_info *ssi;
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
- unsigned int len = be16_to_cpu(pkt->len);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
/*
- * If the ingress packet is small enough, allocate an skb large enough
- * for all of the data and copy it inline. Otherwise, allocate an skb
- * with enough room to pull in the header and reference the rest of
- * the data via the skb fragment list.
+ * Convert the Packet Gather List into an skb.
*/
- if (len <= RX_COPY_THRES) {
- /* small packets have only one fragment */
- skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, gl->frags[0].size);
- skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
- } else {
- skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, RX_PKT_PULL_LEN);
- skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = (gl->frags[0].page_offset +
- RX_PKT_PULL_LEN);
- ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags-1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
- skb->len = len + PKTSHIFT;
- skb->data_len = skb->len - RX_PKT_PULL_LEN;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
+ skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
}
-
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
@@ -1536,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
} else
skb_checksum_none_assert(skb);
+ /*
+ * Deliver the packet to the stack.
+ */
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
@@ -1549,11 +1584,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
netif_receive_skb(skb);
return 0;
-
-nomem:
- t4vf_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return 0;
}
/**
@@ -1679,6 +1709,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
}
len = RSPD_LEN(len);
}
+ gl.tot_len = len;
/*
* Gather packet fragments.
@@ -2115,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
/*
* Calculate the size of the hardware free list ring plus
- * status page (which the SGE will place at the end of the
+ * Status Page (which the SGE will place after the end of the
* free list ring) in Egress Queue Units.
*/
flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2212,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
struct port_info *pi = netdev_priv(dev);
/*
- * Calculate the size of the hardware TX Queue (including the
- * status age on the end) in units of TX Descriptors.
+ * Calculate the size of the hardware TX Queue (including the Status
+ * Page on the end of the TX Queue) in units of TX Descriptors.
*/
nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
int __devinit t4vf_wait_dev_ready(struct adapter *);
int __devinit t4vf_port_init(struct adapter *, int);
+int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..f7d7f976064b 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
}
/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware reseting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
* t4vf_query_params - query FW or device parameters
* @adapter: the adapter
* @nparams: the number of parameters
@@ -1257,7 +1276,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
*/
int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
{
- struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
+ const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
switch (opcode) {
@@ -1265,7 +1284,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
/*
* Link/module state change message.
*/
- const struct fw_port_cmd *port_cmd = (void *)rpl;
+ const struct fw_port_cmd *port_cmd =
+ (const struct fw_port_cmd *)rpl;
u32 word;
int action, port_id, link_ok, speed, fc, pidx;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 9f6aeefa06bf..2d4c4fc1d900 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1675,7 +1675,7 @@ dm9000_drv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
- dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
+ dm9000_release_board(pdev, netdev_priv(ndev));
free_netdev(ndev); /* free device structure */
dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7236f1a53ba0..9333921010cc 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -74,6 +74,9 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +110,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
phy->type = e1000_phy_bm;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
break;
default:
return -E1000_ERR_PHY;
@@ -200,6 +205,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
break;
}
+ /* Function Pointers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -542,6 +558,94 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
+/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 ret_val = 0;
+ s32 i = 0;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ do {
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ msleep(2);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ e_dbg("Driver can't access the PHY\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ mutex_lock(&swflag_mutex);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ mutex_unlock(&swflag_mutex);
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ e1000_put_hw_semaphore_82573(hw);
+ mutex_unlock(&swflag_mutex);
+}
/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +666,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82573:
- case e1000_82574:
- case e1000_82583:
break;
default:
ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +955,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+ u32 ctrl, ctrl_ext, icr;
s32 ret_val;
- u16 i = 0;
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +981,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
case e1000_82574:
case e1000_82583:
- extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- do {
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = er32(EXTCNF_CTRL);
-
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
- break;
-
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- msleep(2);
- i++;
- } while (i < MDIO_OWNERSHIP_TIMEOUT);
+ ret_val = e1000_get_hw_semaphore_82574(hw);
break;
default:
break;
}
+ if (ret_val)
+ e_dbg("Cannot acquire MDIO ownership\n");
ctrl = er32(CTRL);
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
if (hw->nvm.type == e1000_nvm_flash_hw) {
udelay(10);
ctrl_ext = er32(CTRL_EXT);
@@ -1431,8 +1532,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
+ * If the partner code word is null, stop forcing
+ * and restart auto negotiation.
*/
- if (rxcw & E1000_RXCW_C) {
+ if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index d3f7a9c3f973..016ea383145a 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -516,6 +516,7 @@
#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c4ca1629f532..9b3f0a996b00 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4595,7 +4595,7 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(adapter, buffer_info);;
+ e1000_put_txbuf(adapter, buffer_info);
}
return 0;
@@ -5465,6 +5465,36 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int vector, msix_irq;
+
+ if (adapter->msix_entries) {
+ vector = 0;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_rx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_tx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_msix_other(msix_irq, netdev);
+ enable_irq(msix_irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
@@ -5474,10 +5504,21 @@ static void e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- disable_irq(adapter->pdev->irq);
- e1000_intr(adapter->pdev->irq, netdev);
-
- enable_irq(adapter->pdev->irq);
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ e1000_intr_msix(adapter->pdev->irq, netdev);
+ break;
+ case E1000E_INT_MODE_MSI:
+ disable_irq(adapter->pdev->irq);
+ e1000_intr_msi(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ default: /* E1000E_INT_MODE_LEGACY */
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ }
}
#endif
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 7c826319ee5a..9e19fbc2f176 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@ struct eepro_local {
#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
#define ee_id_eepro10p1 0x31
-#define TX_TIMEOUT 40
+#define TX_TIMEOUT ((4*HZ)/10)
/* Index to functions, as function prototypes. */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c91d364c5527..70672541364e 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.6"
+#define DRV_VERSION "1.4.1.7"
#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef91dd43..9f293fa24768 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -2042,7 +2042,7 @@ static int enic_dev_hang_reset(struct enic *enic)
static int enic_set_rsskey(struct enic *enic)
{
- u64 rss_key_buf_pa;
+ dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
union vnic_rss_key rss_key = {
.key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2073,7 +2073,7 @@ static int enic_set_rsskey(struct enic *enic)
static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
{
- u64 rss_cpu_buf_pa;
+ dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
unsigned int i;
int err;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e9f5d030bc26..50c1213f61fe 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -366,9 +366,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
while (bcom_buffer_done(priv->tx_dmatsk)) {
struct sk_buff *skb;
struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
dev_kfree_skb_irq(skb);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
netif_wake_queue(dev);
@@ -395,9 +394,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
struct bcom_fec_bd *bd;
u32 status, physaddr;
int length;
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
while (bcom_buffer_done(priv->rx_dmatsk)) {
@@ -429,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
/* Process the received skb - Drop the spin lock while
* calling into the network stack */
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
rskb->protocol = eth_type_trans(rskb, dev);
netif_rx(rskb);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@@ -452,7 +450,6 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 ievent;
- unsigned long flags;
ievent = in_be32(&fec->ievent);
@@ -470,9 +467,9 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
mpc52xx_fec_reset(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 49e4ce1246a7..d1bec6269173 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np,
irq_of_parse_and_map(np, 1);
priv->gfargrp[priv->num_grps].interruptError =
irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
- priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
- priv->gfargrp[priv->num_grps].interruptError < 0) {
+ if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
return -EINVAL;
- }
}
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b8..3bc8e276ba4d 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
spin_unlock_irqrestore(&priv->bflock, flags);
return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc3204cb7..06bb9b799458 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
index c2f150d8f2d9..0fa3db3dd8b6 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/igbvf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 Intel Corporation.
+# Copyright(c) 2009 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
index 88a47537518a..79f2604673fe 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/igbvf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 33add708bcbe..abb3606928fb 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index debeee2dc717..9d4d63e536d4 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -126,7 +126,6 @@ struct igbvf_buffer {
unsigned int page_offset;
};
};
- struct page *page;
};
union igbvf_desc {
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
index 819a8ec901dc..3d6f4cc3998a 100644
--- a/drivers/net/igbvf/mbx.c
+++ b/drivers/net/igbvf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
index 4938609dbfb5..c2883c45d477 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/igbvf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 28af019c97bb..4c998b7726da 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,12 +44,13 @@
#include "igbvf.h"
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.8-k0"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
"Intel(R) Virtual Function Network Driver";
-static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static const char igbvf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
@@ -1851,8 +1852,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
-
mac->ops.get_link_up_info(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
@@ -1862,11 +1861,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- txb2b = 0;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
- txb2b = 0;
/* maybe add some timeout factor ? */
break;
}
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
index b9e24ed70d0a..77e18d3d6b15 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/igbvf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index a9a61efa964c..0cc13c6ed418 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index 1e8ce3741a67..c36ea21f17fa 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc0198092343..aa93655c3aa7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
"IC PLUS IP1000 1000/100/10 based NIC",
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
- "Tamarack Microelectronics TC9020/9021 based NIC",
"D-Link NIC IP1000A"
};
static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
- { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
- { PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4020), 4 },
+ { PCI_VDEVICE(DLINK, 0x9021), 2 },
+ { PCI_VDEVICE(DLINK, 0x4020), 3 },
{ 0, }
};
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 8df645e78f2e..38e15be6d513 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1605,7 +1605,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
veth_dev[i] = dev;
- port = (struct veth_port*)netdev_priv(dev);
+ port = netdev_priv(dev);
/* Start the state machine on each connection on this vlan. If we're
* the first dev to do so this will commence link negotiation */
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb49169..7d7387fbdecd 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o
+ ixgbe_mbx.o ixgbe_x540.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ed8703cfffb7..3ae30b8cb7d6 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -61,10 +61,8 @@
#define IXGBE_MIN_RXD 64
/* flow control */
-#define IXGBE_DEFAULT_FCRTL 0x10000
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_MAX_FCRTL 0x7FF80
-#define IXGBE_DEFAULT_FCRTH 0x20000
#define IXGBE_MIN_FCRTH 0x600
#define IXGBE_MAX_FCRTH 0x7FFF0
#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
@@ -130,7 +128,9 @@ struct ixgbe_tx_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- u16 mapped_as_page;
+ unsigned int bytecount;
+ u16 gso_segs;
+ u8 mapped_as_page;
};
struct ixgbe_rx_buffer {
@@ -146,12 +146,56 @@ struct ixgbe_queue_stats {
u64 bytes;
};
+struct ixgbe_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 completed;
+ u64 tx_done_old;
+};
+
+struct ixgbe_rx_queue_stats {
+ u64 rsc_count;
+ u64 rsc_flush;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+};
+
+enum ixbge_ring_state_t {
+ __IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_DETECT_HANG,
+ __IXGBE_HANG_CHECK_ARMED,
+ __IXGBE_RX_PS_ENABLED,
+ __IXGBE_RX_RSC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_rsc_enabled(ring) \
+ test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+ set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
void *desc; /* descriptor ring memory */
+ struct device *dev; /* device for DMA mapping */
+ struct net_device *netdev; /* netdev ring belongs to */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
+ unsigned long state;
u8 atr_sample_rate;
u8 atr_count;
u16 count; /* amount of descriptors */
@@ -160,38 +204,30 @@ struct ixgbe_ring {
u16 next_to_clean;
u8 queue_index; /* needed for multiqueue queue management */
-
-#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
- u8 flags; /* per ring feature flags */
- u16 head;
- u16 tail;
-
- unsigned int total_bytes;
- unsigned int total_packets;
-
-#ifdef CONFIG_IXGBE_DCA
- /* cpu for tx queue */
- int cpu;
-#endif
-
- u16 work_limit; /* max work per interrupt */
- u16 reg_idx; /* holds the special value that gets
+ u8 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
+ u16 work_limit; /* max work per interrupt */
+
+ u8 __iomem *tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
+ union {
+ struct ixgbe_tx_queue_stats tx_stats;
+ struct ixgbe_rx_queue_stats rx_stats;
+ };
int numa_node;
- unsigned long reinit_state;
- u64 rsc_count; /* stat for coalesced packets */
- u64 rsc_flush; /* stats for flushed packets */
- u32 restart_queue; /* track tx queue restarts */
- u32 non_eop_descs; /* track hardware descriptor chaining */
-
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
+ struct rcu_head rcu;
+ struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -237,6 +273,9 @@ struct ixgbe_q_vector {
unsigned int v_idx; /* index of q_vector within array, also used for
* finding the bit in EICR and friends that
* represents the vector for this ring */
+#ifdef CONFIG_IXGBE_DCA
+ int cpu; /* CPU for DCA */
+#endif
struct napi_struct napi;
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +285,7 @@ struct ixgbe_q_vector {
u8 rx_itr;
u32 eitr;
cpumask_var_t affinity_mask;
+ char name[IFNAMSIZ + 9];
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -294,7 +334,6 @@ struct ixgbe_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
u8 dcb_set_bitmap;
@@ -417,6 +456,7 @@ struct ixgbe_adapter {
int node;
struct work_struct check_overtemp_task;
u32 interrupt_event;
+ char lsc_int_name[IFNAMSIZ + 9];
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -428,17 +468,25 @@ enum ixbge_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
- __IXGBE_FDIR_INIT_DONE,
__IXGBE_SFP_MODULE_NOT_FOUND
};
+struct ixgbe_rsc_cb {
+ dma_addr_t dma;
+ u16 skb_cnt;
+ bool delay_unmap;
+};
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
enum ixgbe_boards {
board_82598,
board_82599,
+ board_X540,
};
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
+extern struct ixgbe_info ixgbe_X540_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,26 +502,24 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct net_device *,
struct ixgbe_adapter *,
struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
+extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -498,6 +544,10 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
+extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d6014cc4..d0f1d9d2c416 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -38,9 +38,6 @@
#define IXGBE_82598_MC_TBL_SIZE 128
#define IXGBE_82598_VFT_TBL_SIZE 128
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
@@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82598;
+ &ixgbe_get_copper_link_capabilities_generic;
}
switch (hw->phy.type) {
@@ -274,37 +271,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = IXGBE_ERR_LINK_SETUP;
- u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
-
- status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
-
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- }
-
- return status;
-}
-
-/**
* ixgbe_get_media_type_82598 - Determines media type
* @hw: pointer to hardware structure
*
@@ -357,6 +323,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 rx_pba_size;
u32 link_speed = 0;
bool link_up;
@@ -459,16 +426,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- (hw->fc.low_water | IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- hw->fc.low_water);
- }
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 6;
+ if (hw->fc.send_xon)
+ reg |= IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
+ reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
@@ -1222,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 0bd8fbb5bfd0..e34643eef162 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
@@ -174,7 +171,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82599;
+ &ixgbe_get_copper_link_capabilities_generic;
}
/* Set necessary function pointers based on phy type */
@@ -184,6 +181,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
+ case ixgbe_phy_aq:
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_generic;
+ break;
default:
break;
}
@@ -290,37 +291,6 @@ out:
}
/**
- * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = IXGBE_ERR_LINK_SETUP;
- u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
-
- status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
-
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- }
-
- return status;
-}
-
-/**
* ixgbe_get_media_type_82599 - Get media type
* @hw: pointer to hardware structure
*
@@ -332,7 +302,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
/* Detect if there is a copper PHY attached. */
if (hw->phy.type == ixgbe_phy_cu_unknown ||
- hw->phy.type == ixgbe_phy_tn) {
+ hw->phy.type == ixgbe_phy_tn ||
+ hw->phy.type == ixgbe_phy_aq) {
media_type = ixgbe_media_type_copper;
goto out;
}
@@ -1924,6 +1895,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
hw->phy.ops.identify(hw);
if (hw->phy.type == ixgbe_phy_tn ||
+ hw->phy.type == ixgbe_phy_aq ||
hw->phy.type == ixgbe_phy_cu_unknown) {
hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
&ext_ability);
@@ -2125,51 +2097,6 @@ fw_version_out:
return status;
}
-/**
- * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
- * the EEPROM
- * @hw: pointer to hardware structure
- * @wwnn_prefix: the alternative WWNN prefix
- * @wwpn_prefix: the alternative WWPN prefix
- *
- * This function will read the EEPROM from the alternative SAN MAC address
- * block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
-{
- u16 offset, caps;
- u16 alt_san_mac_blk_offset;
-
- /* clear output first */
- *wwnn_prefix = 0xFFFF;
- *wwpn_prefix = 0xFFFF;
-
- /* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
-
- if ((alt_san_mac_blk_offset == 0) ||
- (alt_san_mac_blk_offset == 0xFFFF))
- goto wwn_prefix_out;
-
- /* check capability in alternative san mac address block */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
- if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
- goto wwn_prefix_out;
-
- /* get the corresponding prefix for WWNN/WWPN */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
-
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
-
-wwn_prefix_out:
- return 0;
-}
-
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2181,7 +2108,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_82599,
- .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2214,6 +2141,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
@@ -2240,5 +2168,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
- .mbx_ops = &mbx_ops_82599,
+ .mbx_ops = &mbx_ops_generic,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e3eca1316389..56052570cac5 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -638,7 +636,7 @@ out:
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -1009,7 +1007,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1072,7 +1070,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
- checksum = ixgbe_calc_eeprom_checksum(hw);
+ checksum = hw->eeprom.ops.calc_checksum(hw);
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
@@ -1110,7 +1108,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
- checksum = ixgbe_calc_eeprom_checksum(hw);
+ checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
checksum);
} else {
@@ -1595,6 +1593,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 mflcn_reg, fccfg_reg;
u32 reg;
u32 rx_pba_size;
+ u32 fcrtl, fcrth;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1671,41 +1670,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
- /* Thresholds are different for link flow control when in DCB mode */
- if (reg & IXGBE_MTQC_RT_ENA) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
- /* Always disable XON for LFC when in DCB mode */
- reg = (rx_pba_size >> 5) & 0xFFE0;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+ fcrth = (rx_pba_size - hw->fc.high_water) << 10;
+ fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
- reg = (rx_pba_size >> 2) & 0xFFE0;
- if (hw->fc.current_mode & ixgbe_fc_tx_pause)
- reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
- } else {
- /*
- * Set up and enable Rx high/low water mark thresholds,
- * enable XON.
- */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- (hw->fc.low_water |
- IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- hw->fc.low_water);
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
- }
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ fcrth |= IXGBE_FCRTH_FCEN;
+ if (hw->fc.send_xon)
+ fcrtl |= IXGBE_FCRTL_XONE;
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
/* Configure pause time (2 TCs per register) */
reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
@@ -2705,3 +2684,48 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return 0;
}
+
+/**
+ * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 424c223437dc..341ca514a281 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -49,9 +49,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
@@ -81,7 +83,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 0d44c6470ca3..d16c260c1f50 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -42,7 +42,8 @@
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
struct tc_bw_alloc *p;
@@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
* credit may not be enough to send out a TSO
* packet in descriptor plane arbitration.
*/
- if (credit_max &&
+ if ((hw->mac.type == ixgbe_mac_82598EB) &&
+ credit_max &&
(credit_max < MINIMUM_CREDIT_FOR_TSO))
credit_max = MINIMUM_CREDIT_FOR_TSO;
@@ -150,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
- else if (hw->mac.type == ixgbe_mac_82599EB)
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+ break;
+ default:
+ break;
+ }
return ret;
}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 0208a87b129e..1cfe38ee1644 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,7 +150,8 @@ struct ixgbe_dcb_config {
/* DCB driver APIs */
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 50288bcadc59..9a5e89c12e05 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (dcb_config->rx_pba_cfg == pba_equal) {
- rx_pba_size = IXGBE_RXPBSIZE_64KB;
- } else {
- rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
- : IXGBE_RXPBSIZE_48KB;
- }
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+ reg = (rx_pba_size - hw->fc.low_water) << 10;
- reg = ((rx_pba_size >> 5) & 0xFFF0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
- reg = ((rx_pba_size >> 2) & 0xFFF0);
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 05f224715073..374e1f74d0f5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (dcb_config->rx_pba_cfg == pba_equal)
- rx_pba_size = IXGBE_RXPBSIZE_64KB;
- else
- rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
- : IXGBE_RXPBSIZE_48KB;
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 10;
- reg = ((rx_pba_size >> 5) & 0xFFE0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
- reg = ((rx_pba_size >> 2) & 0xFFE0);
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465e24af..bf566e8a455e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
adapter->last_lfc_mode = adapter->hw.fc.current_mode;
adapter->hw.fc.requested_mode = ixgbe_fc_none;
- }
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ break;
+ default:
+ break;
}
+
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
@@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ break;
+ default:
+ break;
+ }
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
@@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
for (i = 0; i < netdev->addr_len; i++)
perm_addr[i] = adapter->hw.mac.perm_addr[i];
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
+ break;
+ default:
+ break;
}
}
@@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
if (adapter->dcb_cfg.pfc_mode_enable) {
- if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
- (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
- adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
+ adapter->last_lfc_mode =
+ adapter->hw.fc.current_mode;
+ break;
+ default:
+ break;
+ }
adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
} else {
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
- else
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ break;
+ default:
+ break;
+ }
}
if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3dc731c22ff2..f9b58394fbb6 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
ADVERTISED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
+ } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
+ (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
} else {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE);
@@ -204,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
/* Get PHY type */
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
/* Copper 10G-BASET */
ecmd->port = PORT_TP;
@@ -332,13 +343,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
else
pause->autoneg = 1;
-#ifdef CONFIG_DCB
- if (hw->fc.current_mode == ixgbe_fc_pfc) {
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- }
-
-#endif
if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +350,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
} else if (hw->fc.current_mode == ixgbe_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
+#ifdef CONFIG_DCB
+ } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+#endif
}
}
@@ -363,7 +372,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return -EINVAL;
#endif
-
fc = hw->fc;
if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +420,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
else
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
-
return 0;
}
@@ -428,16 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 feature_list;
- if (data) {
- netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- netdev->features |= NETIF_F_SCTP_CSUM;
- } else {
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- netdev->features &= ~NETIF_F_SCTP_CSUM;
+ feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ feature_list |= NETIF_F_SCTP_CSUM;
+ break;
+ default:
+ break;
}
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
return 0;
}
@@ -530,10 +538,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
- for (i = 0; i < 8; i++)
- regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
- for (i = 0; i < 8; i++)
- regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ for (i = 0; i < 8; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ break;
+ case ixgbe_mac_82599EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ break;
+ default:
+ break;
+ }
+ }
regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
@@ -615,6 +633,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
+ /* DCB */
regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -905,13 +924,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(adapter,
- &temp_tx_ring[i]);
+ err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ ixgbe_free_tx_resources(&temp_tx_ring[i]);
}
goto clear_reset;
}
@@ -930,13 +947,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(adapter,
- &temp_rx_ring[i]);
+ err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_rx_resources(adapter,
- &temp_rx_ring[i]);
+ ixgbe_free_rx_resources(&temp_rx_ring[i]);
}
goto err_setup;
}
@@ -951,8 +966,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter,
- adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
sizeof(struct ixgbe_ring));
}
@@ -962,8 +976,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter,
- adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
sizeof(struct ixgbe_ring));
}
@@ -1237,12 +1250,20 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
u32 value, before, after;
u32 i, toggle;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- toggle = 0x7FFFF30F;
- test = reg_test_82599;
- } else {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
toggle = 0x7FFFF3FF;
test = reg_test_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ toggle = 0x7FFFF30F;
+ test = reg_test_82599;
+ break;
+ default:
+ *data = 1;
+ return 1;
+ break;
}
/*
@@ -1460,16 +1481,21 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+ break;
+ default:
+ break;
}
ixgbe_reset(adapter);
- ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
- ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
+ ixgbe_free_tx_resources(&adapter->test_tx_ring);
+ ixgbe_free_rx_resources(&adapter->test_rx_ring);
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1509,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IXGBE_DEFAULT_TXD;
tx_ring->queue_index = 0;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
tx_ring->numa_node = adapter->node;
- err = ixgbe_setup_tx_resources(adapter, tx_ring);
+ err = ixgbe_setup_tx_resources(tx_ring);
if (err)
return 1;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+ break;
+ default:
+ break;
}
ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1534,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
/* Setup Rx Descriptor ring and Rx buffers */
rx_ring->count = IXGBE_DEFAULT_RXD;
rx_ring->queue_index = 0;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
rx_ring->numa_node = adapter->node;
- err = ixgbe_setup_rx_resources(adapter, rx_ring);
+ err = ixgbe_setup_rx_resources(rx_ring);
if (err) {
ret_val = 4;
goto err_nomem;
@@ -1604,8 +1639,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
return 13;
}
-static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
+static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
struct ixgbe_ring *tx_ring,
unsigned int size)
{
@@ -1627,7 +1661,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap Rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
bufsz,
DMA_FROM_DEVICE);
@@ -1639,7 +1673,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
/* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -1655,7 +1689,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
}
/* re-map buffers to ring, store next to clean values */
- ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+ ixgbe_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
tx_ring->next_to_clean = tx_ntc;
@@ -1699,7 +1733,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
for (i = 0; i < 64; i++) {
skb_get(skb);
tx_ret_val = ixgbe_xmit_frame_ring(skb,
- adapter->netdev,
adapter,
tx_ring);
if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1747,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
/* allow 200 milliseconds for packets to go from Tx to Rx */
msleep(200);
- good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
- tx_ring, size);
+ good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
if (good_cnt != 64) {
ret_val = 13;
break;
@@ -1848,6 +1880,13 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
int retval = 1;
switch(hw->device_id) {
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (hw->subsystem_device_id ==
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+ wol->supported = 0;
+ break;
+ }
case IXGBE_DEV_ID_82599_KX4:
retval = 0;
break;
@@ -1985,6 +2024,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
return 0;
}
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+ return false;
+
+ /* if interrupt rate is too high then disable RSC */
+ if (ec->rx_coalesce_usecs != 1 &&
+ ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ e_info(probe, "rx-usecs set too low, "
+ "disabling RSC\n");
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ } else {
+ /* check the feature flag value and enable RSC if necessary */
+ if ((netdev->features & NETIF_F_LRO) &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ e_info(probe, "rx-usecs set to %d, "
+ "re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ }
+ return false;
+}
+
static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -2002,17 +2076,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
- u32 max_int;
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- max_int = IXGBE_MAX_RSC_INT_RATE;
- else
- max_int = IXGBE_MAX_INT_RATE;
-
/* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > max_int) ||
+ if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
(1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
return -EINVAL;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
+
/* store the value in ints/second */
adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
@@ -2021,32 +2092,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* clear the lower bit as its used for dynamic state */
adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
+
/* 1 means dynamic mode */
adapter->rx_eitr_param = 20000;
adapter->rx_itr_setting = 1;
} else {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
/*
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
adapter->rx_itr_setting = 0;
-
- /*
- * if hardware RSC is enabled, disable it when
- * setting low latency mode, to avoid errata, assuming
- * that when the user set low latency mode they want
- * it at the cost of anything else
- */
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
- if (netdev->features & NETIF_F_LRO) {
- netdev->features &= ~NETIF_F_LRO;
- e_info(probe, "rx-usecs set to 0, "
- "disabling RSC\n");
- }
- need_reset = true;
- }
}
if (ec->tx_coalesce_usecs > 1) {
@@ -2133,28 +2193,39 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
return rc;
/* if state changes we need to update adapter->flags and reset */
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
- /*
- * cast both to bool and verify if they are set the same
- * but only enable RSC if itr is non-zero, as
- * itr=0 and RSC are mutually exclusive
- */
- if (((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
- adapter->rx_itr_setting) {
+ if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+ (!!(data & ETH_FLAG_LRO) !=
+ !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
+ if ((data & ETH_FLAG_LRO) &&
+ (!adapter->rx_itr_setting ||
+ (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
+ e_info(probe, "rx-usecs set too low, "
+ "not enabling RSC.\n");
+ } else {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
need_reset = true;
break;
+ case ixgbe_mac_X540: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring =
+ adapter->rx_ring[i];
+ if (adapter->flags2 &
+ IXGBE_FLAG2_RSC_ENABLED) {
+ ixgbe_configure_rscctl(adapter,
+ ring);
+ } else {
+ ixgbe_clear_rscctl(adapter,
+ ring);
+ }
+ }
+ }
+ break;
default:
break;
}
- } else if (!adapter->rx_itr_setting) {
- netdev->features &= ~NETIF_F_LRO;
- if (data & ETH_FLAG_LRO)
- e_info(probe, "rx-usecs set to 0, "
- "LRO/RSC cannot be enabled.\n");
}
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05efa6a8ce8e..6342d4859790 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
{
ddp->len = 0;
- ddp->err = 0;
+ ddp->err = 1;
ddp->udl = NULL;
ddp->udp = 0UL;
ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
struct ixgbe_fcoe *fcoe;
struct ixgbe_adapter *adapter;
struct ixgbe_fcoe_ddp *ddp;
+ u32 fcbuff;
if (!netdev)
goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_WE));
+
+ /* guaranteed to be invalidated after 100us */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ (xid | IXGBE_FCDMARW_RE));
+ fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
spin_unlock_bh(&fcoe->lock);
+ if (fcbuff & IXGBE_FCBUFF_VALID)
+ udelay(100);
}
if (ddp->sgl)
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -168,6 +176,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
return 0;
}
+ /* no DDP if we are already down or resetting */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return 0;
+
fcoe = &adapter->fcoe;
if (!fcoe->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4ee5a1..025419567440 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,13 +52,14 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.84-k2"
+#define DRV_VERSION "3.0.12-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
+ [board_X540] = &ixgbe_X540_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -112,6 +113,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
+ board_82599 },
/* required last entry */
{0, }
@@ -560,6 +563,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +593,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
{
u32 mask;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
- } else {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ break;
+ default:
+ break;
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
- struct ixgbe_tx_buffer
- *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
@@ -626,92 +635,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_tx_xon_state - check the tx ring xon state
- * @adapter: the ixgbe adapter
- * @tx_ring: the corresponding tx_ring
+ * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
+ * @adapter: driver private struct
+ * @index: reg idx of queue to query (0-127)
*
- * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
- * corresponding TC of this tx_ring when checking TFCS.
+ * Helper function to determine the traffic index for a paticular
+ * register index.
*
- * Returns : true if in xon state (currently not paused)
+ * Returns : a tc index for use in range 0-7, or 0-3
*/
-static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
{
- u32 txoff = IXGBE_TFCS_TXOFF;
+ int tc = -1;
+ int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
-#ifdef CONFIG_IXGBE_DCB
- if (adapter->dcb_cfg.pfc_mode_enable) {
- int tc;
- int reg_idx = tx_ring->reg_idx;
- int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+ /* if DCB is not enabled the queues have no TC */
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return tc;
+
+ /* check valid range */
+ if (reg_idx >= adapter->hw.mac.max_tx_queues)
+ return tc;
- switch (adapter->hw.mac.type) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ tc = reg_idx >> 2;
+ break;
+ default:
+ if (dcb_i != 4 && dcb_i != 8)
+ break;
+
+ /* if VMDq is enabled the lowest order bits determine TC */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ tc = reg_idx & (dcb_i - 1);
+ break;
+ }
+
+ /*
+ * Convert the reg_idx into the correct TC. This bitmask
+ * targets the last full 32 ring traffic class and assigns
+ * it a value of 1. From there the rest of the rings are
+ * based on shifting the mask further up to include the
+ * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
+ * will only ever be 8 or 4 and that reg_idx will never
+ * be greater then 128. The code without the power of 2
+ * optimizations would be:
+ * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
+ */
+ tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
+ tc >>= 9 - (reg_idx >> 5);
+ }
+
+ return tc;
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u32 data = 0;
+ u32 xoff[8] = {0};
+ int i;
+
+ if ((hw->fc.current_mode == ixgbe_fc_full) ||
+ (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+ switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- tc = reg_idx >> 2;
- txoff = IXGBE_TFCS_TXOFF0;
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
break;
- case ixgbe_mac_82599EB:
- tc = 0;
- txoff = IXGBE_TFCS_TXOFF;
- if (dcb_i == 8) {
- /* TC0, TC1 */
- tc = reg_idx >> 5;
- if (tc == 2) /* TC2, TC3 */
- tc += (reg_idx - 64) >> 4;
- else if (tc == 3) /* TC4, TC5, TC6, TC7 */
- tc += 1 + ((reg_idx - 96) >> 3);
- } else if (dcb_i == 4) {
- /* TC0, TC1 */
- tc = reg_idx >> 6;
- if (tc == 1) {
- tc += (reg_idx - 64) >> 5;
- if (tc == 2) /* TC2, TC3 */
- tc += (reg_idx - 96) >> 4;
- }
- }
+ default:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ }
+ hwstats->lxoffrxc += data;
+
+ /* refill credits (no tx hang) if we received xoff */
+ if (!data)
+ return;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->tx_ring[i]->state);
+ return;
+ } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+ return;
+
+ /* update stats for each tc, only valid with PFC enabled */
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
break;
default:
- tc = 0;
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
}
- txoff <<= tc;
+ hwstats->pxoffrxc[i] += xoff[i];
+ }
+
+ /* disarm tx queues that have received xoff frames */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+
+ if (xoff[tc])
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
-#endif
- return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- unsigned int eop)
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
{
+ return ring->tx_stats.completed;
+}
+
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
struct ixgbe_hw *hw = &adapter->hw;
- /* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of eop */
- adapter->detect_tx_hung = false;
- if (tx_ring->tx_buffer_info[eop].time_stamp &&
- time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
- ixgbe_tx_xon_state(adapter, tx_ring)) {
- /* detected Tx unit hang */
- union ixgbe_adv_tx_desc *tx_desc;
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
- e_err(drv, "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->queue_index,
- IXGBE_READ_REG(hw, tx_ring->head),
- IXGBE_READ_REG(hw, tx_ring->tail),
- tx_ring->next_to_use, eop,
- tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
- return true;
+ u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+ u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+ u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /*
+ * Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * pfc clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_done_old == tx_done) && tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and continue */
+ tx_ring->tx_stats.tx_done_old = tx_done;
+ /* reset the countdown */
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
- return false;
+ return ret;
}
#define IXGBE_MAX_TXD_PWR 14
@@ -734,11 +817,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct net_device *netdev = adapter->netdev;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ u16 i, eop, count = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,147 +831,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
- struct sk_buff *skb;
tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- cleaned = (i == eop);
- skb = tx_buffer_info->skb;
-
- if (cleaned && skb) {
- unsigned int segs, bytecount;
- unsigned int hlen = skb_headlen(skb);
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
- /* adjust for FCoE Sequence Offload */
- if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- && (skb->protocol == htons(ETH_P_FCOE)) &&
- skb_is_gso(skb)) {
- hlen = skb_transport_offset(skb) +
- sizeof(struct fc_frame_header) +
- sizeof(struct fcoe_crc_eof);
- segs = DIV_ROUND_UP(skb->len - hlen,
- skb_shinfo(skb)->gso_size);
- }
-#endif /* IXGBE_FCOE */
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * hlen) + skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
-
- ixgbe_unmap_and_free_tx_resource(adapter,
- tx_buffer_info);
tx_desc->wb.status = 0;
+ cleaned = (i == eop);
i++;
if (i == tx_ring->count)
i = 0;
+
+ if (cleaned && tx_buffer_info->skb) {
+ total_bytes += tx_buffer_info->bytecount;
+ total_packets += tx_buffer_info->gso_segs;
+ }
+
+ ixgbe_unmap_and_free_tx_resource(tx_ring,
+ tx_buffer_info);
}
+ tx_ring->tx_stats.completed++;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
}
tx_ring->next_to_clean = i;
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.packets += total_packets;
+ tx_ring->stats.bytes += total_bytes;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ struct ixgbe_hw *hw = &adapter->hw;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ e_info(probe,
+ "tx hang %d detected on queue %d, resetting adapter\n",
+ adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+ /* schedule immediate reset if we believe we hung */
+ ixgbe_tx_timeout(adapter->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
- }
- }
-
- if (adapter->detect_tx_hung) {
- if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
- /* schedule immediate reset if we believe we hung */
- e_info(probe, "tx hang %d detected, resetting "
- "adapter\n", adapter->tx_timeout_count + 1);
- ixgbe_tx_timeout(adapter->netdev);
+ netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
}
}
- /* re-arm the interrupt */
- if (count >= tx_ring->work_limit)
- ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.packets += total_packets;
- tx_ring->stats.bytes += total_bytes;
- u64_stats_update_end(&tx_ring->syncp);
return count < tx_ring->work_limit;
}
#ifdef CONFIG_IXGBE_DCA
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
- int cpu = get_cpu();
- int q = rx_ring->reg_idx;
-
- if (rx_ring->cpu != cpu) {
- rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
- rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
- }
- rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
- rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
- rx_ring->cpu = cpu;
+ u8 reg_idx = rx_ring->reg_idx;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+ rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ break;
+ default:
+ break;
}
- put_cpu();
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 txctrl;
+ u8 reg_idx = tx_ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+ txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
int cpu = get_cpu();
- int q = tx_ring->reg_idx;
- struct ixgbe_hw *hw = &adapter->hw;
+ long r_idx;
+ int i;
- if (tx_ring->cpu != cpu) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
- txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
- }
- tx_ring->cpu = cpu;
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
}
+
+ q_vector->cpu = cpu;
+out_no_update:
put_cpu();
}
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
+ int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -898,22 +1015,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i]->cpu = -1;
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i]->cpu = -1;
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (i = 0; i < num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ ixgbe_update_dca(adapter->q_vector[i]);
}
}
static int __ixgbe_notify_dca(struct device *dev, void *data)
{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
+ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ return 0;
+
switch (event) {
case DCA_PROVIDER_ADD:
/* if we're already enabled, don't do it again */
@@ -1012,8 +1132,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
{
/*
* Force memory writes to complete before letting h/w
@@ -1022,72 +1141,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
**/
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len;
+ struct sk_buff *skb;
+ u16 i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev)
+ return;
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ skb = bi->skb;
- if (!bi->page_dma &&
- (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = netdev_alloc_page(netdev);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
- if (!bi->skb) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
- bufsz);
- bi->skb = skb;
-
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = dma_map_single(&pdev->dev,
- bi->skb->data,
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(rx_ring->netdev);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
@@ -1098,56 +1226,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+ ixgbe_release_rx_desc(rx_ring, i);
}
}
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
-{
- return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >>
- IXGBE_RXDADV_RSCCNT_SHIFT;
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR_SIZE)
+ hlen = IXGBE_RX_HDR_SIZE;
+ return hlen;
}
/**
* ixgbe_transform_rsc_queue - change rsc queue into a full packet
* @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
*
* This function changes a queue full of hw rsc buffers into a completed
* packet. It uses the ->prev pointers to find the first packet and then
* turns it into the frag list owner.
**/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
- u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
{
unsigned int frag_list_size = 0;
+ unsigned int skb_cnt = 1;
while (skb->prev) {
struct sk_buff *prev = skb->prev;
frag_list_size += skb->len;
skb->prev = NULL;
skb = prev;
- *count += 1;
+ skb_cnt++;
}
skb_shinfo(skb)->frag_list = skb->next;
@@ -1155,68 +1275,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
skb->len += frag_list_size;
skb->data_len += frag_list_size;
skb->truesize += frag_list_size;
+ IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
return skb;
}
-struct ixgbe_rsc_cb {
- dma_addr_t dma;
- bool delay_unmap;
-};
-
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK);
+}
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
- unsigned int i, rsc_count = 0;
- u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
- int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ const int current_node = numa_node_id();
#ifdef IXGBE_FCOE
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
+ u32 staterr;
+ u16 i;
+ u16 cleaned_count = 0;
+ bool pkt_is_rsc = false;
i = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
u32 upper_len = 0;
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- if ((len > IXGBE_RX_HDR_SIZE) ||
- (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
- len = IXGBE_RX_HDR_SIZE;
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
skb = rx_buffer_info->skb;
- prefetch(skb->data);
rx_buffer_info->skb = NULL;
+ prefetch(skb->data);
+
+ if (ring_is_rsc_enabled(rx_ring))
+ pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+ /* if this is a skb from previous receive DMA will be 0 */
if (rx_buffer_info->dma) {
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
- (!(staterr & IXGBE_RXD_STAT_EOP)) &&
- (!(skb->prev))) {
+ u16 hlen;
+ if (pkt_is_rsc &&
+ !(staterr & IXGBE_RXD_STAT_EOP) &&
+ !skb->prev) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
@@ -1227,29 +1338,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
} else {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
- skb_put(skb, len);
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ hlen = ixgbe_get_hlen(rx_desc);
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ hlen = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ skb_put(skb, hlen);
+ } else {
+ /* assume packet split since header is unmapped */
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
}
if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev,
+ rx_buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
rx_buffer_info->page_offset,
upper_len);
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
+ if ((page_count(rx_buffer_info->page) == 1) &&
+ (page_to_nid(rx_buffer_info->page) == current_node))
get_page(rx_buffer_info->page);
+ else
+ rx_buffer_info->page = NULL;
skb->len += upper_len;
skb->data_len += upper_len;
@@ -1264,10 +1388,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(next_rxd);
cleaned_count++;
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
- rsc_count = ixgbe_get_rsc_count(rx_desc);
-
- if (rsc_count) {
+ if (pkt_is_rsc) {
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
IXGBE_RXDADV_NEXTP_SHIFT;
next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1275,32 +1396,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
next_buffer = &rx_ring->rx_buffer_info[i];
}
- if (staterr & IXGBE_RXD_STAT_EOP) {
- if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb,
- &(rx_ring->rsc_count));
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- if (IXGBE_RSC_CB(skb)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
- IXGBE_RSC_CB(skb)->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- IXGBE_RSC_CB(skb)->dma = 0;
- IXGBE_RSC_CB(skb)->delay_unmap = false;
- }
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
- rx_ring->rsc_count +=
- skb_shinfo(skb)->nr_frags;
- else
- rx_ring->rsc_count++;
- rx_ring->rsc_flush++;
- }
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets++;
- rx_ring->stats.bytes += skb->len;
- u64_stats_update_end(&rx_ring->syncp);
- } else {
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (ring_is_ps_enabled(rx_ring)) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
@@ -1309,12 +1406,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
- rx_ring->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
+ if (skb->prev) {
+ skb = ixgbe_transform_rsc_queue(skb);
+ /* if we got here without RSC the packet is invalid */
+ if (!pkt_is_rsc) {
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ if (ring_is_rsc_enabled(rx_ring)) {
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(rx_ring->dev,
+ IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ }
+ if (pkt_is_rsc) {
+ if (ring_is_ps_enabled(rx_ring))
+ rx_ring->rx_stats.rsc_count +=
+ skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rx_stats.rsc_count +=
+ IXGBE_RSC_CB(skb)->skb_cnt;
+ rx_ring->rx_stats.rsc_flush++;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
- dev_kfree_skb_irq(skb);
+ /* trim packet back to size 0 and recycle it */
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
goto next_desc;
}
@@ -1324,7 +1454,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_bytes += skb->len;
total_rx_packets++;
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1338,16 +1468,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
next_desc:
rx_desc->wb.upper.status_error = 0;
+ (*work_done)++;
+ if (*work_done >= work_to_do)
+ break;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
/* use prefetched values */
rx_desc = next_rxd;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
@@ -1355,14 +1487,14 @@ next_desc:
cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
if (cleaned_count)
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
#ifdef IXGBE_FCOE
/* include DDPed FCoE data */
if (ddp_bytes > 0) {
unsigned int mss;
- mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+ mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
sizeof(struct fc_frame_header) -
sizeof(struct fcoe_crc_eof);
if (mss > 512)
@@ -1374,8 +1506,10 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
-
- return cleaned;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
}
static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1389,7 +1523,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int i, j, q_vectors, v_idx, r_idx;
+ int i, q_vectors, v_idx, r_idx;
u32 mask;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1405,8 +1539,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 0, j, v_idx);
+ u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
+ ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
r_idx + 1);
@@ -1415,8 +1549,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 1, j, v_idx);
+ u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
+ ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
r_idx + 1);
@@ -1447,11 +1581,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
}
}
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
v_idx);
- else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
+ break;
+
+ default:
+ break;
+ }
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
/* set up to autoclear timer, and the vectors */
@@ -1547,12 +1689,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
/* must write high and low 16 bits to reset counter */
itr_reg |= (itr_reg << 16);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
/*
- * 82599 can support a value of zero, so allow it for
+ * 82599 and X540 can support a value of zero, so allow it for
* max interrupt rate, but there is an errata where it can
* not be zero with RSC
*/
@@ -1565,6 +1710,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
* immediate assertion of the interrupt
*/
itr_reg |= IXGBE_EITR_CNT_WDIS;
+ break;
+ default:
+ break;
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
}
@@ -1572,14 +1720,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
+ int i, r_idx;
u32 new_itr;
u8 current_itr, ret_itr;
- int i, r_idx;
- struct ixgbe_ring *rx_ring, *tx_ring;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = adapter->tx_ring[r_idx];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1594,7 +1741,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = adapter->rx_ring[r_idx];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1625,7 +1772,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
/* save the algorithm value here, not the smoothed one */
q_vector->eitr = new_itr;
@@ -1693,17 +1840,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ if (eicr & IXGBE_EICR_GPI_SDP2) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->sfp_config_module_task);
+ }
+
if (eicr & IXGBE_EICR_GPI_SDP1) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- schedule_work(&adapter->multispeed_fiber_task);
- } else if (eicr & IXGBE_EICR_GPI_SDP2) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
- schedule_work(&adapter->sfp_config_module_task);
- } else {
- /* Interrupt isn't for us... */
- return;
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->multispeed_fiber_task);
}
}
@@ -1743,16 +1891,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
- if (hw->mac.type == ixgbe_mac_82598EB)
- ixgbe_check_fan_failure(adapter, eicr);
-
- if (hw->mac.type == ixgbe_mac_82599EB) {
- ixgbe_check_sfp_event(adapter, eicr);
- adapter->interrupt_event = eicr;
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
- schedule_work(&adapter->check_overtemp_task);
-
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
int i;
@@ -1762,12 +1903,24 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
adapter->tx_ring[i];
- if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state))
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &tx_ring->state))
schedule_work(&adapter->fdir_reinit_task);
}
}
+ ixgbe_check_sfp_event(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ adapter->interrupt_event = eicr;
+ schedule_work(&adapter->check_overtemp_task);
+ }
+ break;
+ default:
+ break;
}
+
+ ixgbe_check_fan_failure(adapter, eicr);
+
if (!test_bit(__IXGBE_DOWN, &adapter->state))
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -1778,15 +1931,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
mask = (qmask >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
}
/* skip the flush */
}
@@ -1795,15 +1957,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
- } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
mask = (qmask >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+ break;
+ default:
+ break;
}
/* skip the flush */
}
@@ -1846,8 +2017,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
int r_idx;
int i;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
@@ -1858,7 +2034,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
if (!q_vector->rxr_count)
return IRQ_HANDLED;
- /* disable interrupts on this vector only */
/* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
@@ -1917,13 +2092,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, rx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = adapter->rx_ring[r_idx];
+
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
@@ -1957,13 +2133,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
long r_idx;
bool tx_clean_complete = true;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
ring = adapter->tx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, ring);
-#endif
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
@@ -1976,10 +2153,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
ring = adapter->rx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, ring);
-#endif
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
@@ -2018,13 +2191,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, tx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ tx_ring = adapter->tx_ring[r_idx];
+
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
@@ -2045,24 +2219,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
int r_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
set_bit(r_idx, q_vector->rxr_idx);
q_vector->rxr_count++;
+ rx_ring->q_vector = q_vector;
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
int t_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
set_bit(t_idx, q_vector->txr_idx);
q_vector->txr_count++;
+ tx_ring->q_vector = q_vector;
}
/**
* ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
* @adapter: board private structure to initialize
- * @vectors: allotted vector count for descriptor rings
*
* This function maps descriptor rings to the queue-specific vectors
* we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2070,9 +2247,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
{
+ int q_vectors;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
int rxr_remaining = adapter->num_rx_queues;
@@ -2085,11 +2262,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
goto out;
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
/*
* The ideal configuration...
* We have enough vectors to map one per queue.
*/
- if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
map_vector_to_rxq(adapter, v_start, rxr_idx);
@@ -2105,23 +2284,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
* multiple queues per vector.
*/
/* Re-adjusting *qpv takes care of the remainder. */
- for (i = v_start; i < vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
for (j = 0; j < rqpv; j++) {
map_vector_to_rxq(adapter, i, rxr_idx);
rxr_idx++;
rxr_remaining--;
}
- }
- for (i = v_start; i < vectors; i++) {
- tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
for (j = 0; j < tqpv; j++) {
map_vector_to_txq(adapter, i, txr_idx);
txr_idx++;
txr_remaining--;
}
}
-
out:
return err;
}
@@ -2143,30 +2319,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /* Map the Tx/Rx rings to the vectors we were allotted. */
- err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+ err = ixgbe_map_rings_to_vectors(adapter);
if (err)
- goto out;
+ return err;
-#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
- &ixgbe_msix_clean_many)
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbe_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbe_msix_clean_tx : \
+ NULL)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(adapter->q_vector[vector]);
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ handler = SET_HANDLER(q_vector);
if (handler == &ixgbe_msix_clean_rx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
+ sprintf(q_vector->name, "%s-%s-%d",
netdev->name, "rx", ri++);
} else if (handler == &ixgbe_msix_clean_tx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
+ sprintf(q_vector->name, "%s-%s-%d",
netdev->name, "tx", ti++);
- } else
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
-
+ } else if (handler == &ixgbe_msix_clean_many) {
+ sprintf(q_vector->name, "%s-%s-%d",
+ netdev->name, "TxRx", ri++);
+ ti++;
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ handler, 0, q_vector->name,
+ q_vector);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
@@ -2174,9 +2356,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
}
}
- sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+ sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2192,17 +2374,16 @@ free_queue_irqs:
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-out:
return err;
}
static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- u8 current_itr;
- u32 new_itr = q_vector->eitr;
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ u32 new_itr = q_vector->eitr;
+ u8 current_itr;
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -2232,9 +2413,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
- /* save the algorithm value here, not the smoothed one */
+ /* save the algorithm value here */
q_vector->eitr = new_itr;
ixgbe_write_eitr(q_vector);
@@ -2255,12 +2436,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
mask |= IXGBE_EIMS_GPI_SDP0;
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
if (adapter->num_vfs)
mask |= IXGBE_EIMS_MAILBOX;
+ break;
+ default:
+ break;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2316,13 +2502,21 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
ixgbe_check_sfp_event(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ adapter->interrupt_event = eicr;
+ schedule_work(&adapter->check_overtemp_task);
+ }
+ break;
+ default:
+ break;
+ }
ixgbe_check_fan_failure(adapter, eicr);
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
- schedule_work(&adapter->check_overtemp_task);
if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0]->total_packets = 0;
@@ -2415,14 +2609,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
- } else {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
if (adapter->num_vfs > 32)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+ break;
+ default:
+ break;
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2468,7 +2668,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u64 tdba = ring->dma;
int wait_loop = 10;
u32 txdctl;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2483,8 +2683,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->head = IXGBE_TDH(reg_idx);
- ring->tail = IXGBE_TDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
/* configure fetching thresholds */
if (adapter->rx_itr_setting == 0) {
@@ -2500,7 +2699,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
}
/* reinitialize flowdirector state */
- set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ adapter->atr_sample_rate) {
+ ring->atr_sample_rate = adapter->atr_sample_rate;
+ ring->atr_count = 0;
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2591,16 +2799,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
u32 srrctl;
- int index;
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ u8 reg_idx = rx_ring->reg_idx;
- index = rx_ring->reg_idx;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- unsigned long mask;
- mask = (unsigned long) feature[RING_F_RSS].mask;
- index = index & mask;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB: {
+ struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ const int mask = feature[RING_F_RSS].mask;
+ reg_idx = reg_idx & mask;
}
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
+ break;
+ }
+
+ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2610,7 +2824,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(rx_ring)) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
@@ -2623,7 +2837,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2693,19 +2907,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rscctrl;
+ u8 reg_idx = ring->reg_idx;
+
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+ rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
+/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
* @adapter: address of board private structure
* @index: index of ring to set
**/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rscctrl;
int rx_buf_len;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ if (!ring_is_rsc_enabled(ring))
return;
rx_buf_len = ring->rx_buf_len;
@@ -2716,7 +2947,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(ring)) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
@@ -2769,9 +3000,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
- int reg_idx = ring->reg_idx;
int wait_loop = IXGBE_MAX_RX_DESC_POLL;
u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2795,7 +3026,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
u64 rdba = ring->dma;
u32 rxdctl;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -2809,8 +3040,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->head = IXGBE_RDH(reg_idx);
- ring->tail = IXGBE_RDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
@@ -2832,7 +3062,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+ ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2955,24 +3185,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
- rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+ set_ring_ps_enabled(rx_ring);
+ else
+ clear_ring_ps_enabled(rx_ring);
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_ring_rsc_enabled(rx_ring);
else
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_rsc_enabled(rx_ring);
#ifdef IXGBE_FCOE
if (netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((i >= f->mask) && (i < f->mask + f->indices)) {
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_ps_enabled(rx_ring);
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ } else if (!ring_is_rsc_enabled(rx_ring) &&
+ !ring_is_ps_enabled(rx_ring)) {
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
}
#endif /* IXGBE_FCOE */
}
-
}
static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2995,6 +3233,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3122,6 +3361,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3151,6 +3391,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3348,8 +3589,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- u32 txdctl;
- int i, j;
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3365,25 +3604,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_RX_CONFIG);
- /* reconfigure the hardware */
- ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- /* PThresh workaround for Tx hang with DFP enabled. */
- txdctl |= 32;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
- }
/* Enable VLAN tag insert/strip */
adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+
+ /* reconfigure the hardware */
+ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
}
#endif
@@ -3515,8 +3747,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
break;
- default:
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
break;
@@ -3561,12 +3794,20 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
ixgbe_configure_msi_and_legacy(adapter);
/* enable the optics */
- if (hw->phy.multispeed_fiber)
+ if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
+ if (ixgbe_is_sfp(hw)) {
+ ixgbe_sfp_link_config(adapter);
+ } else {
+ err = ixgbe_non_sfp_link_config(hw);
+ if (err)
+ e_err(probe, "link_config FAILED %d\n", err);
+ }
+
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_EICR);
ixgbe_irq_enable(adapter, true, true);
@@ -3589,26 +3830,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
* If we're not hot-pluggable SFP+, we just need to configure link
* and bring it up.
*/
- if (hw->phy.type == ixgbe_phy_unknown) {
- err = hw->phy.ops.identify(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- /*
- * Take the device down and schedule the sfp tasklet
- * which will unregister_netdev and log it.
- */
- ixgbe_down(adapter);
- schedule_work(&adapter->sfp_config_module_task);
- return err;
- }
- }
-
- if (ixgbe_is_sfp(hw)) {
- ixgbe_sfp_link_config(adapter);
- } else {
- err = ixgbe_non_sfp_link_config(hw);
- if (err)
- e_err(probe, "link_config FAILED %d\n", err);
- }
+ if (hw->phy.type == ixgbe_phy_unknown)
+ schedule_work(&adapter->sfp_config_module_task);
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -3686,15 +3909,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!rx_ring->rx_buffer_info)
@@ -3706,7 +3927,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -3717,7 +3938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
do {
struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(dev,
IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
@@ -3731,7 +3952,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ dma_unmap_page(dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
@@ -3748,24 +3969,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
* ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buffer_info)
@@ -3774,7 +3988,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3785,11 +3999,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -3801,7 +4010,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -3813,7 +4022,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3822,7 +4031,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
u32 txdctl;
- int i, j;
+ int i;
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* signal that we are down to the interrupt handler */
@@ -3880,19 +4089,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
}
/* Disable the Tx DMA engine on 82599 */
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
+ break;
+ default:
+ break;
+ }
/* power down the optics */
- if (hw->phy.multispeed_fiber)
+ if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
/* clear n-tuple filters that are cached */
@@ -3924,10 +4139,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
int tx_clean_complete, work_done = 0;
#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
- }
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
#endif
tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3955,6 +4168,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
+
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
}
@@ -3969,8 +4184,6 @@ static void ixgbe_reset_task(struct work_struct *work)
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
- adapter->tx_timeout_count++;
-
ixgbe_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
ixgbe_reinit_locked(adapter);
@@ -4220,19 +4433,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
- bool ret = false;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
- } else {
- ret = false;
- }
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return false;
- return ret;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
}
#ifdef CONFIG_IXGBE_DCB
@@ -4249,71 +4459,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
bool ret = false;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- /* the number of queues is assumed to be symmetric */
- for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i]->reg_idx = i << 3;
- adapter->tx_ring[i]->reg_idx = i << 2;
- }
- ret = true;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- if (dcb_i == 8) {
- /*
- * Tx TC0 starts at: descriptor queue 0
- * Tx TC1 starts at: descriptor queue 32
- * Tx TC2 starts at: descriptor queue 64
- * Tx TC3 starts at: descriptor queue 80
- * Tx TC4 starts at: descriptor queue 96
- * Tx TC5 starts at: descriptor queue 104
- * Tx TC6 starts at: descriptor queue 112
- * Tx TC7 starts at: descriptor queue 120
- *
- * Rx TC0-TC7 are offset by 16 queues each
- */
- for (i = 0; i < 3; i++) {
- adapter->tx_ring[i]->reg_idx = i << 5;
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
- for ( ; i < 5; i++) {
- adapter->tx_ring[i]->reg_idx =
- ((i + 2) << 4);
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
- for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i]->reg_idx =
- ((i + 8) << 3);
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return false;
- ret = true;
- } else if (dcb_i == 4) {
- /*
- * Tx TC0 starts at: descriptor queue 0
- * Tx TC1 starts at: descriptor queue 64
- * Tx TC2 starts at: descriptor queue 96
- * Tx TC3 starts at: descriptor queue 112
- *
- * Rx TC0-TC3 are offset by 32 queues each
- */
- adapter->tx_ring[0]->reg_idx = 0;
- adapter->tx_ring[1]->reg_idx = 64;
- adapter->tx_ring[2]->reg_idx = 96;
- adapter->tx_ring[3]->reg_idx = 112;
- for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i]->reg_idx = i << 5;
-
- ret = true;
- } else {
- ret = false;
+ /* the number of queues is assumed to be symmetric */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ for (i = 0; i < dcb_i; i++) {
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
+ }
+ ret = true;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (dcb_i == 8) {
+ /*
+ * Tx TC0 starts at: descriptor queue 0
+ * Tx TC1 starts at: descriptor queue 32
+ * Tx TC2 starts at: descriptor queue 64
+ * Tx TC3 starts at: descriptor queue 80
+ * Tx TC4 starts at: descriptor queue 96
+ * Tx TC5 starts at: descriptor queue 104
+ * Tx TC6 starts at: descriptor queue 112
+ * Tx TC7 starts at: descriptor queue 120
+ *
+ * Rx TC0-TC7 are offset by 16 queues each
+ */
+ for (i = 0; i < 3; i++) {
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
- } else {
- ret = false;
+ for ( ; i < 5; i++) {
+ adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
+ adapter->rx_ring[i]->reg_idx = i << 4;
+ }
+ for ( ; i < dcb_i; i++) {
+ adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
+ adapter->rx_ring[i]->reg_idx = i << 4;
+ }
+ ret = true;
+ } else if (dcb_i == 4) {
+ /*
+ * Tx TC0 starts at: descriptor queue 0
+ * Tx TC1 starts at: descriptor queue 64
+ * Tx TC2 starts at: descriptor queue 96
+ * Tx TC3 starts at: descriptor queue 112
+ *
+ * Rx TC0-TC3 are offset by 32 queues each
+ */
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
+ for (i = 0 ; i < dcb_i; i++)
+ adapter->rx_ring[i]->reg_idx = i << 5;
+ ret = true;
}
- } else {
- ret = false;
+ break;
+ default:
+ break;
}
-
return ret;
}
#endif
@@ -4353,55 +4559,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
*/
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{
- int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
- bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ int i;
+ u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
#ifdef CONFIG_IXGBE_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- ixgbe_cache_ring_dcb(adapter);
- /* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
- /*
- * In 82599, the number of Tx queues for each traffic
- * class for both 8-TC and 4-TC modes are:
- * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
- * 8 TCs: 32 32 16 16 8 8 8 8
- * 4 TCs: 64 64 32 32
- * We have max 8 queues for FCoE, where 8 the is
- * FCoE redirection table size. If TC for FCoE is
- * less than or equal to TC3, we have enough queues
- * to add max of 8 queues for FCoE, so we start FCoE
- * tx descriptor from the next one, i.e., reg_idx + 1.
- * If TC for FCoE is above TC3, implying 8 TC mode,
- * and we need 8 for FCoE, we have to take all queues
- * in that traffic class for FCoE.
- */
- if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
- fcoe_tx_i--;
- }
+ ixgbe_cache_ring_dcb(adapter);
+ /* find out queues in TC for FCoE */
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
+ /*
+ * In 82599, the number of Tx queues for each traffic
+ * class for both 8-TC and 4-TC modes are:
+ * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+ * 8 TCs: 32 32 16 16 8 8 8 8
+ * 4 TCs: 64 64 32 32
+ * We have max 8 queues for FCoE, where 8 the is
+ * FCoE redirection table size. If TC for FCoE is
+ * less than or equal to TC3, we have enough queues
+ * to add max of 8 queues for FCoE, so we start FCoE
+ * Tx queue from the next one, i.e., reg_idx + 1.
+ * If TC for FCoE is above TC3, implying 8 TC mode,
+ * and we need 8 for FCoE, we have to take all queues
+ * in that traffic class for FCoE.
+ */
+ if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+ fcoe_tx_i--;
+ }
#endif /* CONFIG_IXGBE_DCB */
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ ixgbe_cache_ring_fdir(adapter);
+ else
+ ixgbe_cache_ring_rss(adapter);
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
- }
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
- }
- ret = true;
+ fcoe_rx_i = f->mask;
+ fcoe_tx_i = f->mask;
}
- return ret;
+ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+ }
+ return true;
}
#endif /* IXGBE_FCOE */
@@ -4470,65 +4676,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
**/
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
- int i;
- int orig_node = adapter->node;
+ int rx = 0, tx = 0, nid = adapter->node;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = adapter->tx_ring[i];
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ if (nid < 0 || !node_online(nid))
+ nid = first_online_node;
+
+ for (; tx < adapter->num_tx_queues; tx++) {
+ struct ixgbe_ring *ring;
+
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
if (!ring)
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err_tx_ring_allocation;
+ goto err_allocation;
ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->numa_node = adapter->node;
+ ring->queue_index = tx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
- adapter->tx_ring[i] = ring;
+ adapter->tx_ring[tx] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
+ for (; rx < adapter->num_rx_queues; rx++) {
+ struct ixgbe_ring *ring;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
if (!ring)
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err_rx_ring_allocation;
+ goto err_allocation;
ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->numa_node = adapter->node;
+ ring->queue_index = rx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
- adapter->rx_ring[i] = ring;
+ adapter->rx_ring[rx] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-
ixgbe_cache_ring_register(adapter);
return 0;
-err_rx_ring_allocation:
- for (i = 0; i < adapter->num_tx_queues; i++)
- kfree(adapter->tx_ring[i]);
-err_tx_ring_allocation:
+err_allocation:
+ while (tx)
+ kfree(adapter->tx_ring[--tx]);
+
+ while (rx)
+ kfree(adapter->rx_ring[--rx]);
return -ENOMEM;
}
@@ -4750,6 +4946,11 @@ err_set_interrupt:
return err;
}
+static void ring_free_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
/**
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @adapter: board private structure to clear interrupt scheme on
@@ -4766,7 +4967,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
adapter->tx_ring[i] = NULL;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ /* ixgbe_get_stats64() might access this ring, we must wait
+ * a grace period before freeing it.
+ */
+ call_rcu(&ring->rcu, ring_free_rcu);
adapter->rx_ring[i] = NULL;
}
@@ -4843,6 +5049,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
int j;
struct tc_configuration *tc;
#endif
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* PCI config space info */
@@ -4857,11 +5064,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
- if (hw->mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4890,6 +5100,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif
#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
}
#ifdef CONFIG_IXGBE_DCB
@@ -4919,8 +5132,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_DCB
adapter->last_lfc_mode = hw->fc.current_mode;
#endif
- hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
- hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
@@ -4958,15 +5171,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -4981,7 +5192,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4994,7 +5205,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
err:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -5013,7 +5224,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5025,48 +5236,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vmalloc(size);
- if (!rx_ring->rx_buffer_info) {
- e_err(probe, "vmalloc allocation failed for the Rx "
- "descriptor ring\n");
- goto alloc_failed;
- }
+ if (!rx_ring->rx_buffer_info)
+ goto err;
memset(rx_ring->rx_buffer_info, 0, size);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- e_err(probe, "Memory allocation failed for the Rx "
- "descriptor ring\n");
- vfree(rx_ring->rx_buffer_info);
- goto alloc_failed;
- }
+ if (!rx_ring->desc)
+ goto err;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
-
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -5080,13 +5284,12 @@ alloc_failed:
*
* Return 0 on success, negative on failure
**/
-
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5098,23 +5301,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
/**
* ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_tx_ring(adapter, tx_ring);
+ ixgbe_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -5131,28 +5334,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
- ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_rx_ring(adapter, rx_ring);
+ ixgbe_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5169,7 +5372,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
- ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -5182,6 +5385,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* MTU < 68 is an error and causes problems on some kernels */
@@ -5192,6 +5396,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
+
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -5287,8 +5494,8 @@ static int ixgbe_close(struct net_device *netdev)
#ifdef CONFIG_PM
static int ixgbe_resume(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
u32 err;
pci_set_power_state(pdev, PCI_D0);
@@ -5319,7 +5526,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
if (netif_running(netdev)) {
- err = ixgbe_open(adapter->netdev);
+ err = ixgbe_open(netdev);
if (err)
return err;
}
@@ -5332,8 +5539,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl, fctrl;
u32 wufc = adapter->wol;
@@ -5350,6 +5557,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_free_all_rx_resources(adapter);
}
+ ixgbe_clear_interrupt_scheme(adapter);
+
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
if (retval)
@@ -5376,15 +5585,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
}
- if (wufc && hw->mac.type == ixgbe_mac_82599EB)
- pci_wake_from_d3(pdev, true);
- else
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
pci_wake_from_d3(pdev, false);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pci_wake_from_d3(pdev, !!wufc);
+ break;
+ default:
+ break;
+ }
*enable_wake = !!wufc;
- ixgbe_clear_interrupt_scheme(adapter);
-
ixgbe_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -5433,10 +5647,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
- u64 non_eop_descs = 0, restart_queue = 0;
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 bytes = 0, packets = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5449,21 +5665,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i]->rsc_count;
- rsc_flush += adapter->rx_ring[i]->rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
}
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ bytes += rx_ring->stats.bytes;
+ packets += rx_ring->stats.packets;
+ }
+ adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ netdev->stats.rx_bytes = bytes;
+ netdev->stats.rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
/* gather some stats to the adapter struct that are per queue */
- for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i]->restart_queue;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ bytes += tx_ring->stats.bytes;
+ packets += tx_ring->stats.packets;
+ }
adapter->restart_queue = restart_queue;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
- adapter->non_eop_descs = non_eop_descs;
+ adapter->tx_busy = tx_busy;
+ netdev->stats.tx_bytes = bytes;
+ netdev->stats.tx_packets = packets;
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
@@ -5478,17 +5714,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- if (hw->mac.type == ixgbe_mac_82599EB) {
- hwstats->pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
- hwstats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
- hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- } else {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
- hwstats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ break;
+ default:
+ break;
}
hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5497,21 +5734,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* work around hardware counting issue */
hwstats->gprc -= missed_rx;
+ ixgbe_update_xoff_received(adapter);
+
/* 82598 hardware only has a 32 bit counter in the high register */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u64 tmp;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
- /* 4 high bits of GORC */
- hwstats->gorc += (tmp << 32);
+ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
- /* 4 high bits of GOTC */
- hwstats->gotc += (tmp << 32);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
- IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
#ifdef IXGBE_FCOE
@@ -5522,12 +5763,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
#endif /* IXGBE_FCOE */
- } else {
- hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ default:
+ break;
}
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
hwstats->bprc += bprc;
@@ -5700,8 +5938,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
- set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i]->state));
} else {
e_err(probe, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters\n");
@@ -5763,17 +6001,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
bool flow_rx, flow_tx;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
- u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
- flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
- flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
- } else {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB: {
u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
}
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540: {
+ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+ }
+ break;
+ default:
+ flow_tx = false;
+ flow_rx = false;
+ break;
+ }
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5787,7 +6035,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
netif_carrier_on(netdev);
} else {
/* Force detection of hung controller */
- adapter->detect_tx_hung = true;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring = adapter->tx_ring[i];
+ set_check_for_tx_hang(tx_ring);
+ }
}
} else {
adapter->link_up = false;
@@ -5823,7 +6074,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
static int ixgbe_tso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+ u32 tx_flags, u8 *hdr_len, __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5841,7 +6092,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -5880,7 +6131,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +6157,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
return false;
}
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+ __be16 protocol)
{
u32 rtn = 0;
- __be16 protocol;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = ((const struct vlan_ethhdr *)skb->data)->
- h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +6188,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
default:
if (unlikely(net_ratelimit()))
e_warn(probe, "partial checksum but proto=%x!\n",
- skb->protocol);
+ protocol);
break;
}
@@ -5952,7 +6197,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5981,7 +6227,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
- type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+ type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
@@ -6004,15 +6250,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ unsigned int first, const u8 hdr_len)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
+ unsigned int bytecount = skb->len;
+ u16 gso_segs = 1;
i = tx_ring->next_to_use;
@@ -6027,10 +6275,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&pdev->dev,
+ tx_buffer_info->dma = dma_map_single(dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -6063,12 +6311,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_page(dev,
frag->page,
offset, size,
DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -6082,6 +6330,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
break;
}
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+ bytecount += (gso_segs - 1) * hdr_len;
+
+ /* multiply data chunks by size of headers */
+ tx_ring->tx_buffer_info[i].bytecount = bytecount;
+ tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
@@ -6103,14 +6364,13 @@ dma_error:
i += tx_ring->count;
i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return 0;
}
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
int tx_flags, int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6175,60 +6435,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
+ writel(i, tx_ring->tail);
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags)
+ u8 queue, u32 tx_flags, __be16 protocol)
{
struct ixgbe_atr_input atr_input;
- struct tcphdr *th;
struct iphdr *iph = ip_hdr(skb);
struct ethhdr *eth = (struct ethhdr *)skb->data;
- u16 vlan_id, src_port, dst_port, flex_bytes;
- u32 src_ipv4_addr, dst_ipv4_addr;
- u8 l4type = 0;
+ struct tcphdr *th;
+ u16 vlan_id;
- /* Right now, we support IPv4 only */
- if (skb->protocol != htons(ETH_P_IP))
- return;
- /* check if we're UDP or TCP */
- if (iph->protocol == IPPROTO_TCP) {
- th = tcp_hdr(skb);
- src_port = th->source;
- dst_port = th->dest;
- l4type |= IXGBE_ATR_L4TYPE_TCP;
- /* l4type IPv4 type is 0, no need to assign */
- } else {
- /* Unsupported L4 header, just bail here */
+ /* Right now, we support IPv4 w/ TCP only */
+ if (protocol != htons(ETH_P_IP) ||
+ iph->protocol != IPPROTO_TCP)
return;
- }
memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
IXGBE_TX_FLAGS_VLAN_SHIFT;
- src_ipv4_addr = iph->saddr;
- dst_ipv4_addr = iph->daddr;
- flex_bytes = eth->h_proto;
+
+ th = tcp_hdr(skb);
ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
- ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
- ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
- ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
- ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+ ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+ ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+ ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+ ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
/* src and dst are inverted, think how the receiver sees them */
- ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
- ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+ ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+ ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
}
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
{
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
@@ -6240,27 +6486,29 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
return 0;
}
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+ return __ixgbe_maybe_stop_tx(tx_ring, size);
}
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
-
#ifdef IXGBE_FCOE
- if ((skb->protocol == htons(ETH_P_FCOE)) ||
- (skb->protocol == htons(ETH_P_FIP))) {
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
+
+ if ((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) {
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6292,10 +6540,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
return skb_tx_hash(dev, skb);
}
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
+ struct net_device *netdev = tx_ring->netdev;
struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
@@ -6303,6 +6552,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
int tso;
int count = 0;
unsigned int f;
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6575,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
- (skb->protocol == htons(ETH_P_FCOE) ||
- skb->protocol == htons(ETH_P_FIP))) {
+ (protocol == htons(ETH_P_FCOE) ||
+ protocol == htons(ETH_P_FIP))) {
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6586,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
}
#endif
/* flag for FCoE offloads */
- if (skb->protocol == htons(ETH_P_FCOE))
+ if (protocol == htons(ETH_P_FCOE))
tx_flags |= IXGBE_TX_FLAGS_FCOE;
}
#endif
@@ -6350,8 +6602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
+ if (ixgbe_maybe_stop_tx(tx_ring, count)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
@@ -6368,9 +6620,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */
} else {
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+ protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -6378,30 +6631,30 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+ protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
- count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+ count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
if (count) {
/* add the ATR filter if ATR is on */
if (tx_ring->atr_sample_rate) {
++tx_ring->atr_count;
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state)) {
+ test_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &tx_ring->state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags);
+ tx_flags, protocol);
tx_ring->atr_count = 0;
}
}
txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
txq->tx_bytes += skb->len;
txq->tx_packets++;
- ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
- hdr_len);
- ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
} else {
dev_kfree_skb_any(skb);
@@ -6418,7 +6671,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
struct ixgbe_ring *tx_ring;
tx_ring = adapter->tx_ring[skb->queue_mapping];
- return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+ return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
/**
@@ -6559,20 +6812,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
/* accurate rx/tx bytes/packets stats */
dev_txq_stats_fold(netdev, stats);
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
- do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
}
-
+ rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
stats->rx_errors = netdev->stats.rx_errors;
@@ -6754,8 +7010,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
- pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
+ pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -6828,8 +7084,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Make it possible the adapter to be woken up via WOL */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ break;
+ default:
+ break;
+ }
/*
* If there is a fan on this device and it has failed log the
@@ -6938,7 +7200,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
/* power down the optics */
- if (hw->phy.multispeed_fiber)
+ if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
init_timer(&adapter->watchdog_timer);
@@ -6953,6 +7215,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
switch (pdev->device) {
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (pdev->subsystem_device ==
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+ adapter->wol = 0;
+ break;
+ }
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -7078,8 +7347,8 @@ err_dma:
**/
static void __devexit ixgbe_remove(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
set_bit(__IXGBE_DOWN, &adapter->state);
/* clear the module not found bit to make sure the worker won't
@@ -7149,8 +7418,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
@@ -7173,8 +7442,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
pci_ers_result_t result;
int err;
@@ -7212,8 +7480,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
*/
static void ixgbe_io_resume(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
if (netif_running(netdev)) {
if (ixgbe_up(adapter)) {
@@ -7278,6 +7546,7 @@ static void __exit ixgbe_exit_module(void)
dca_unregister_notify(&dca_notifier);
#endif
pci_unregister_driver(&ixgbe_driver);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
#ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index 471f0f2cdb98..027c628c3aae 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -319,8 +319,14 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vflre = 0;
s32 ret_val = IXGBE_ERR_MBX;
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ break;
+ default:
+ break;
+ }
if (vflre & (1 << vf_shift)) {
ret_val = 0;
@@ -439,22 +445,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (hw->mac.type != ixgbe_mac_82599EB)
- return;
-
- mbx->timeout = 0;
- mbx->usec_delay = 0;
-
- mbx->size = IXGBE_VFMAILBOX_SIZE;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+ break;
+ default:
+ break;
+ }
}
-struct ixgbe_mbx_operations mbx_ops_82599 = {
+struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 7e0d08ff5b53..3df9b1590218 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
-extern struct ixgbe_mbx_operations mbx_ops_82599;
+extern struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e33f21..c445fbce56ee 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
+ case AQ1202_PHY_ID:
+ phy_type = ixgbe_phy_aq;
+ break;
case QT2022_PHY_ID:
phy_type = ixgbe_phy_qt;
break;
@@ -425,6 +428,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_ERR_LINK_SETUP;
+ u16 speed_ability;
+
+ *speed = 0;
+ *autoneg = true;
+
+ status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+ &speed_ability);
+
+ if (status == 0) {
+ if (speed_ability & MDIO_SPEED_10G)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_1000)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_100)
+ *speed |= IXGBE_LINK_SPEED_100_FULL;
+ }
+
+ return status;
+}
+
+/**
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
@@ -1378,6 +1414,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+**/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
+ firmware_version);
+
+ return status;
+}
+
+/**
* ixgbe_tn_check_overtemp - Checks if an overtemp occured.
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f12fc5..e2c6b7eac641 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
bool *link_up);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 5428153af8f3..6e3e94b5a5f6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
* addresses
*/
for (i = 0; i < entries; i++) {
- vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
}
for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
bool enable = ((event_mask & 0x10000000U) != 0);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index d3cc6ce7c973..42c607339a62 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -57,6 +57,8 @@
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_X540T 0x1528
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -994,8 +996,10 @@
/* PHY IDs*/
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
+#define AQ1202_PHY_ID 0x03A1B440
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
+#define AQ_FW_REV 0x20
/* PHY Types */
#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
@@ -1491,6 +1495,7 @@
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1505,7 +1510,9 @@
#define IXGBE_EEPROM_SUM 0xBABA
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
#define IXGBE_PCIE_GENERAL_PTR 0x06
#define IXGBE_PCIE_CONFIG0_PTR 0x07
#define IXGBE_PCIE_CONFIG1_PTR 0x08
@@ -2113,6 +2120,14 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+/* Flow Control Macros */
+#define PAUSE_RTT 8
+#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
+
+#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
+ PAUSE_MTU(MTU))
+#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
+
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
@@ -2164,6 +2179,7 @@ struct ixgbe_atr_input_masks {
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
+ ixgbe_flash,
ixgbe_eeprom_none /* No NVM support */
};
@@ -2171,12 +2187,14 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
+ ixgbe_mac_X540,
ixgbe_num_macs
};
enum ixgbe_phy_type {
ixgbe_phy_unknown = 0,
ixgbe_phy_tn,
+ ixgbe_phy_aq,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -2405,6 +2423,7 @@ struct ixgbe_eeprom_operations {
s32 (*write)(struct ixgbe_hw *, u16, u16);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
+ u16 (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
@@ -2574,6 +2593,7 @@ struct ixgbe_hw {
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
+ bool force_full_reset;
};
struct ixgbe_info {
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 000000000000..9649fa727e31
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,722 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+//#include "ixgbe_mbx.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ return ixgbe_media_type_copper;
+}
+
+static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* Call PHY identify routine to get the phy type */
+ ixgbe_identify_phy_generic(hw);
+
+ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status = 0;
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 reset_bit;
+ u32 i;
+ u32 autoc;
+ u32 autoc2;
+ bool link_up = false;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests before reset
+ */
+ status = ixgbe_disable_pcie_master(hw);
+ if (status != 0) {
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+ }
+
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ if (hw->force_full_reset) {
+ reset_bit = IXGBE_CTRL_LNK_RST;
+ } else {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (!link_up)
+ reset_bit = IXGBE_CTRL_LNK_RST;
+ else
+ reset_bit = IXGBE_CTRL_RST;
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST))
+ break;
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ msleep(50);
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc2 = autoc2;
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+ if (autoc != hw->mac.orig_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+ IXGBE_AUTOC_AN_RESTART));
+
+ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+ autoc2 |= (hw->mac.orig_autoc2 &
+ IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ }
+ }
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.identify(hw);
+
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EERPOM
+ **/
+static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM))
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ u32 eewr;
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) {
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+out:
+ ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer+1; j <= pointer+length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_update_eeprom_checksum_generic(hw);
+
+ if (status)
+ status = ixgbe_update_flash_X540(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ hw_dbg(hw, "Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ if (hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = 0;
+ break;
+ }
+ udelay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 hwmask = 0;
+ u32 timeout = 200;
+ u32 i;
+
+ if (swmask == IXGBE_GSSR_EEP_SM)
+ hwmask = IXGBE_GSSR_FLASH_SM;
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ break;
+ } else {
+ /*
+ * Firmware currently using resource (fwmask),
+ * hardware currently using resource (hwmask),
+ * or other software thread currently using
+ * resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+ }
+ }
+
+ /*
+ * If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should sets the
+ * SW bit(s) of the requested resource(s) while ignoring the
+ * corresponding FW/HW bits in the SW_FW_SYNC register.
+ */
+ if (i >= timeout) {
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (swfw_sync & (fwmask | hwmask)) {
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ }
+ }
+
+ msleep(5);
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore throught the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = 0;
+ break;
+ }
+ udelay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ udelay(50);
+ }
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static struct ixgbe_mac_operations mac_ops_X540 = {
+ .init_hw = &ixgbe_init_hw_generic,
+ .reset_hw = &ixgbe_reset_hw_X540,
+ .start_hw = &ixgbe_start_hw_generic,
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
+ .get_media_type = &ixgbe_get_media_type_X540,
+ .get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X540,
+ .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = &ixgbe_get_mac_addr_generic,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
+ .get_device_caps = NULL,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
+ .stop_adapter = &ixgbe_stop_adapter_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
+ .read_analog_reg8 = NULL,
+ .write_analog_reg8 = NULL,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .check_link = &ixgbe_check_mac_link_generic,
+ .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .led_on = &ixgbe_led_on_generic,
+ .led_off = &ixgbe_led_off_generic,
+ .blink_led_start = &ixgbe_blink_led_start_generic,
+ .blink_led_stop = &ixgbe_blink_led_stop_generic,
+ .set_rar = &ixgbe_set_rar_generic,
+ .clear_rar = &ixgbe_clear_rar_generic,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
+ .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
+ .enable_mc = &ixgbe_enable_mc_generic,
+ .disable_mc = &ixgbe_disable_mc_generic,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
+ .setup_sfp = NULL,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+ .init_params = &ixgbe_init_eeprom_params_X540,
+ .read = &ixgbe_read_eerd_X540,
+ .write = &ixgbe_write_eewr_X540,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
+ .update_checksum = &ixgbe_update_eeprom_checksum_X540,
+};
+
+static struct ixgbe_phy_operations phy_ops_X540 = {
+ .identify = &ixgbe_identify_phy_generic,
+ .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = NULL,
+ .reset = &ixgbe_reset_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+};
+
+struct ixgbe_info ixgbe_X540_info = {
+ .mac = ixgbe_mac_X540,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X540,
+ .eeprom_ops = &eeprom_ops_X540,
+ .phy_ops = &phy_ops_X540,
+ .mbx_ops = &mbx_ops_generic,
+};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
index dd4e0d27e8cc..1f35d229e71a 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ixgbevf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index ca2c81f49a05..f8a807d606c7 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index da4033c6efa2..0cd6abcf9306 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index dc03c9652389..5b8063cb4e6c 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -51,9 +51,10 @@ char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 82599 Virtual Function";
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.12-k0"
const char ixgbevf_driver_version[] = DRV_VERSION;
-static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static char ixgbevf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_vf_info,
@@ -3424,10 +3425,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (hw->mac.ops.get_bus_info)
hw->mac.ops.get_bus_info(hw);
-
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
@@ -3436,6 +3433,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
adapter->netdev_registered = true;
+ netif_carrier_off(netdev);
+
ixgbevf_init_last_counter_stats(adapter);
/* print the MAC address */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index 84ac486f4a65..7a8833125770 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 8c063bebee7f..b2b5bf5daa3d 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index 12f75960aec1..fb80ca1bcc93 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index bfe42c1fcfaf..971019d819b4 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 61f9dc831424..144c99d5363a 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d85edf3119c2..c57d9a43ceca 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2955,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev,
* Tell stack that we are not ready to work until open()
*/
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- /*
- * Register netdev
- */
rc = register_netdev(netdev);
if (rc) {
pr_err("Cannot register net device\n");
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fcd50c2..0fa4a9887ba2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
/* driver bus management functions */
+#ifdef CONFIG_PM
+static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ ks8851_net_stop(dev);
+ }
+
+ return 0;
+}
+
+static int ks8851_resume(struct spi_device *spi)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ ks8851_net_open(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define ks8851_suspend NULL
+#define ks8851_resume NULL
+#endif
+
static int __devinit ks8851_probe(struct spi_device *spi)
{
struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
},
.probe = ks8851_probe,
.remove = __devexit_p(ks8851_remove),
+ .suspend = ks8851_suspend,
+ .resume = ks8851_resume,
};
static int __init ks8851_init(void)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296bfe293..02336edce748 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
#define LANCE_BUS_IF 0x16
#define LANCE_TOTAL_SIZE 0x18
-#define TX_TIMEOUT 20
+#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f4291b350..9e042894479b 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@ enum commands {
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
struct i596_reg {
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e7030ceb178b..da74db4a03d4 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -203,7 +203,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
static int __ei_open(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@ static int __ei_open(struct net_device *dev)
*/
static int __ei_close(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/*
@@ -256,7 +256,7 @@ static int __ei_close(struct net_device *dev)
static void __ei_tx_timeout(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
@@ -303,7 +303,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int send_length = skb->len, output_page;
unsigned long flags;
char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@ static void ei_tx_err(struct net_device *dev)
static void ei_tx_intr(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int status = ei_inb(e8390_base + EN0_TSR);
ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@ static void ei_tx_intr(struct net_device *dev)
static void ei_receive(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char rxing_page, this_frame, next_frame;
unsigned short current_offset;
int rx_pkt_count = 0;
@@ -879,7 +879,7 @@ static void ei_rx_overrun(struct net_device *dev)
static struct net_device_stats *__ei_get_stats(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@ static void do_set_multicast_list(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
int i;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
{
@@ -981,7 +981,7 @@ static void do_set_multicast_list(struct net_device *dev)
static void __ei_set_multicast_list(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
spin_lock_irqsave(&ei_local->page_lock, flags);
do_set_multicast_list(dev);
@@ -998,7 +998,7 @@ static void __ei_set_multicast_list(struct net_device *dev)
static void ethdev_setup(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (ei_debug > 1)
printk(version);
@@ -1036,7 +1036,7 @@ static struct net_device *____alloc_ei_netdev(int size)
static void __NS8390_init(struct net_device *dev, int startp)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
int endcfg = ei_local->word16
? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
int start_page)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0fc9dc7f20db..6ed577b065df 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,6 +38,7 @@ struct macvlan_port {
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct rcu_head rcu;
+ bool passthru;
};
#define macvlan_port_get_rcu(dev) \
@@ -169,6 +170,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
macvlan_broadcast(skb, port, NULL,
MACVLAN_MODE_PRIVATE |
MACVLAN_MODE_VEPA |
+ MACVLAN_MODE_PASSTHRU|
MACVLAN_MODE_BRIDGE);
else if (src->mode == MACVLAN_MODE_VEPA)
/* flood to everyone except source */
@@ -185,7 +187,10 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
return skb;
}
- vlan = macvlan_hash_lookup(port, eth->h_dest);
+ if (port->passthru)
+ vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+ else
+ vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
return skb;
@@ -243,18 +248,22 @@ xmit_world:
netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int i = skb_get_queue_mapping(skb);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
unsigned int len = skb->len;
int ret;
+ const struct macvlan_dev *vlan = netdev_priv(dev);
ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- txq->tx_packets++;
- txq->tx_bytes += len;
- } else
- txq->tx_dropped++;
+ struct macvlan_pcpu_stats *pcpu_stats;
+ pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(vlan->pcpu_stats->tx_dropped);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -284,6 +293,11 @@ static int macvlan_open(struct net_device *dev)
struct net_device *lowerdev = vlan->lowerdev;
int err;
+ if (vlan->port->passthru) {
+ dev_set_promiscuity(lowerdev, 1);
+ goto hash_add;
+ }
+
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
@@ -296,6 +310,8 @@ static int macvlan_open(struct net_device *dev)
if (err < 0)
goto del_unicast;
}
+
+hash_add:
macvlan_hash_add(vlan);
return 0;
@@ -310,12 +326,18 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
+ if (vlan->port->passthru) {
+ dev_set_promiscuity(lowerdev, -1);
+ goto hash_del;
+ }
+
dev_mc_unsync(lowerdev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
dev_uc_del(lowerdev, dev->dev_addr);
+hash_del:
macvlan_hash_del(vlan);
return 0;
}
@@ -414,14 +436,15 @@ static int macvlan_init(struct net_device *dev)
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
+ dev->features |= NETIF_F_LLTX;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
- vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
- if (!vlan->rx_stats)
+ vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+ if (!vlan->pcpu_stats)
return -ENOMEM;
return 0;
@@ -431,7 +454,7 @@ static void macvlan_uninit(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- free_percpu(vlan->rx_stats);
+ free_percpu(vlan->pcpu_stats);
}
static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +462,38 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
{
struct macvlan_dev *vlan = netdev_priv(dev);
- dev_txq_stats_fold(dev, stats);
-
- if (vlan->rx_stats) {
- struct macvlan_rx_stats *p, accum = {0};
- u64 rx_packets, rx_bytes, rx_multicast;
+ if (vlan->pcpu_stats) {
+ struct macvlan_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+ u32 rx_errors = 0, tx_dropped = 0;
unsigned int start;
int i;
for_each_possible_cpu(i) {
- p = per_cpu_ptr(vlan->rx_stats, i);
+ p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
- accum.rx_packets += rx_packets;
- accum.rx_bytes += rx_bytes;
- accum.rx_multicast += rx_multicast;
- /* rx_errors is an ulong, updated without syncp protection */
- accum.rx_errors += p->rx_errors;
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->multicast += rx_multicast;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* rx_errors & tx_dropped are u32, updated
+ * without syncp protection.
+ */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
}
- stats->rx_packets = accum.rx_packets;
- stats->rx_bytes = accum.rx_bytes;
- stats->rx_errors = accum.rx_errors;
- stats->rx_dropped = accum.rx_errors;
- stats->multicast = accum.rx_multicast;
+ stats->rx_errors = rx_errors;
+ stats->rx_dropped = rx_errors;
+ stats->tx_dropped = tx_dropped;
}
return stats;
}
@@ -549,6 +577,7 @@ static int macvlan_port_create(struct net_device *dev)
if (port == NULL)
return -ENOMEM;
+ port->passthru = false;
port->dev = dev;
INIT_LIST_HEAD(&port->vlans);
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
@@ -593,6 +622,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
case MACVLAN_MODE_PRIVATE:
case MACVLAN_MODE_VEPA:
case MACVLAN_MODE_BRIDGE:
+ case MACVLAN_MODE_PASSTHRU:
break;
default:
return -EINVAL;
@@ -601,25 +631,6 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int macvlan_get_tx_queues(struct net *net,
- struct nlattr *tb[],
- unsigned int *num_tx_queues,
- unsigned int *real_num_tx_queues)
-{
- struct net_device *real_dev;
-
- if (!tb[IFLA_LINK])
- return -EINVAL;
-
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev)
- return -ENODEV;
-
- *num_tx_queues = real_dev->num_tx_queues;
- *real_num_tx_queues = real_dev->real_num_tx_queues;
- return 0;
-}
-
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
int (*receive)(struct sk_buff *skb),
@@ -661,6 +672,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
}
port = macvlan_port_get(lowerdev);
+ /* Only 1 macvlan device can be created in passthru mode */
+ if (port->passthru)
+ return -EINVAL;
+
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
@@ -671,6 +686,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (data && data[IFLA_MACVLAN_MODE])
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+ if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+ if (!list_empty(&port->vlans))
+ return -EINVAL;
+ port->passthru = true;
+ memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
+ }
+
err = register_netdevice(dev);
if (err < 0)
goto destroy_port;
@@ -743,7 +765,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
{
/* common fields */
ops->priv_size = sizeof(struct macvlan_dev);
- ops->get_tx_queues = macvlan_get_tx_queues;
ops->validate = macvlan_validate;
ops->maxtype = IFLA_MACVLAN_MAX;
ops->policy = macvlan_policy;
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index e0b0ef11f110..30be8c634ebd 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -86,7 +86,7 @@ static u32 reg_offset[16];
static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
unsigned char bus_width;
@@ -218,7 +218,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
int start_page, stop_page;
int reg0, ret;
static unsigned version_printed;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char bus_width;
if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@ static int ne_close(struct net_device *dev)
static void ne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (ei_debug > 1)
printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@ static void ne_reset_8390(struct net_device *dev)
static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
@@ -437,7 +437,7 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
#ifdef NE_SANITY_CHECK
int xfer_count = count;
#endif
@@ -507,7 +507,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
static void ne_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a75ba9517404..e1d30d7f2071 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dda..1a0eb128e607 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- caddr_t base;
- struct timer_list watchdog;
- int stale, fast_poll;
- u_short link_status;
- u_char duplex_flag;
- int phy_id;
- int flags;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+ int active_low;
} axnet_dev_t;
static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
if (info->flags & IS_AX88790)
outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+ info->active_low = 0;
+
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
if ((j != 0) && (j != 0xffff)) break;
}
- /* Maybe PHY is in power down mode. (PPD_SET = 1)
- Bit 2 of CCSR is active low. */
if (i == 32) {
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
if (j == j2) continue;
- if ((j != 0) && (j != 0xffff)) break;
+ if ((j != 0) && (j != 0xffff)) {
+ info->active_low = 1;
+ break;
+ }
}
}
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
static int axnet_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
if (link->open) {
+ if (info->active_low == 1)
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+
axnet_reset_8390(dev);
AX88190_init(dev, 1);
netif_device_attach(dev);
@@ -865,7 +875,7 @@ static void do_set_multicast_list(struct net_device *dev);
static int ax_open(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
/*
* Grab the page lock so we own the register set, then call
@@ -916,7 +926,7 @@ static int ax_close(struct net_device *dev)
static void axnet_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
@@ -963,7 +973,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int length, send_length, output_page;
unsigned long flags;
u8 packet[ETH_ZLEN];
@@ -1260,7 +1270,7 @@ static void ei_tx_err(struct net_device *dev)
static void ei_tx_intr(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int status = inb(e8390_base + EN0_TSR);
/*
@@ -1344,7 +1354,7 @@ static void ei_tx_intr(struct net_device *dev)
static void ei_receive(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char rxing_page, this_frame, next_frame;
unsigned short current_offset;
int rx_pkt_count = 0;
@@ -1529,7 +1539,7 @@ static void ei_rx_overrun(struct net_device *dev)
static struct net_device_stats *get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/* If the card is stopped, just return the present stats. */
@@ -1578,7 +1588,7 @@ static void do_set_multicast_list(struct net_device *dev)
{
long e8390_base = dev->base_addr;
int i;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
memset(ei_local->mcfilter, 0, 8);
@@ -1636,7 +1646,7 @@ static void AX88190_init(struct net_device *dev, int startp)
{
axnet_dev_t *info = PRIV(dev);
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
@@ -1702,7 +1712,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
int start_page)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
if (inb_p(e8390_base) & E8390_TRANS)
{
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d0416a..0c91598ae280 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1136,8 +1136,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
a four-byte PPP header on each packet */
*skb_push(skb, 2) = 1;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter,
- ppp->pass_len) == 0) {
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
printk(KERN_DEBUG "PPP: outbound frame not passed\n");
kfree_skb(skb);
@@ -1145,8 +1144,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter,
- ppp->active_len) == 0))
+ sk_run_filter(skb, ppp->active_filter) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2);
#else
@@ -1758,8 +1756,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
*skb_push(skb, 2) = 0;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter,
- ppp->pass_len) == 0) {
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
printk(KERN_DEBUG "PPP: inbound frame "
"not passed\n");
@@ -1767,8 +1764,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
return;
}
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter,
- ppp->active_len) == 0))
+ sk_run_filter(skb, ppp->active_filter) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7496ed2c34ab..1a3584edd79c 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2467,7 +2467,7 @@ map_error:
static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
struct ql_tx_buf_cb *tx_cb;
@@ -3390,7 +3390,7 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
static void ql_display_dev_info(struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct pci_dev *pdev = qdev->pdev;
netdev_info(ndev,
@@ -3573,7 +3573,7 @@ static int ql3xxx_open(struct net_device *ndev)
static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
static void ql3xxx_tx_timeout(struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
netdev_err(ndev, "Resetting...\n");
/*
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 8ecc170c9b74..56f54ffabb2f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 11
-#define QLCNIC_LINUX_VERSIONID "5.0.11"
+#define _QLCNIC_LINUX_SUBVERSION 12
+#define QLCNIC_LINUX_VERSIONID "5.0.12"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -1126,8 +1126,7 @@ struct qlcnic_eswitch {
/* Return codes for Error handling */
#define QL_STATUS_INVALID_PARAM -1
-#define MAX_BW 100
-#define MIN_BW 1
+#define MAX_BW 100 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
#define MAX_TX_QUEUES 1
@@ -1135,7 +1134,7 @@ struct qlcnic_eswitch {
#define DEFAULT_MAC_LEARN 1
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
-#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
+#define IS_VALID_BW(bw) (bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
@@ -1377,6 +1376,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
"3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
{0x1077, 0x8020, 0x103c, 0x3733,
"NC523SFP 10Gb 2-port Server Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter"},
{0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
};
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1cdc05dade6b..3ad1f3eba289 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -480,6 +480,9 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
{
int err;
+ if (reset_devices)
+ pci_reset_function(adapter->pdev);
+
err = qlcnic_fw_cmd_create_rx_ctx(adapter);
if (err)
return err;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index ec21d24015c4..c38929636488 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -925,9 +925,10 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
dev->features &= ~NETIF_F_LRO;
qlcnic_send_lro_cleanup(adapter);
+ dev_info(&adapter->pdev->dev,
+ "disabling LRO as rx_csum is off\n");
}
adapter->rx_csum = !!data;
- dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
return 0;
}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 7a298cdf9ab3..a3dcd04be22f 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1450,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc63..bdb8fe868539 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.25.00.00-01"
+#define DRV_VERSION "v1.00.00.27.00.00-01"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2221,6 +2221,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
u32 ram_addr, int word_count);
int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2237,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935ef..fca804f36d61 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+ /*
+ * If the dump has already been taken and is stored
+ * in our internal buffer and if force dump is set then
+ * just start the spool to dump it to the log file
+ * and also, take a snapshot of the general regs to
+ * to the user's buffer or else take complete dump
+ * to the user's buffer if force is not set.
+ */
- if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+ if (!ql_core_dump(qdev, buff))
+ ql_soft_reset_mpi_risc(qdev);
+ else
+ netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+ } else {
+ ql_gen_reg_dump(qdev, buff);
ql_get_core_dump(qdev);
+ }
}
/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e05..8149cc9de4ca 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+ else
+ drvinfo->regdump_len = sizeof(struct ql_reg_dump);
drvinfo->eedump_len = 0;
}
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
static int ql_get_regs_len(struct net_device *ndev)
{
- return sizeof(struct ql_reg_dump);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ return sizeof(struct ql_mpi_coredump);
+ else
+ return sizeof(struct ql_reg_dump);
}
static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
- ql_gen_reg_dump(qdev, p);
+ ql_get_dump(qdev, p);
+ qdev->core_is_dumped = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ regs->len = sizeof(struct ql_mpi_coredump);
+ else
+ regs->len = sizeof(struct ql_reg_dump);
}
static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c30e0fe55a31..d9a76260880b 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3844,7 +3844,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
static void ql_display_dev_info(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
netif_info(qdev, probe, qdev->ndev,
"Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4264,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
struct netdev_hw_addr *ha;
int i, status;
@@ -4354,7 +4354,7 @@ exit:
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
struct sockaddr *addr = p;
int status;
@@ -4377,7 +4377,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
static void qlge_tx_timeout(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
}
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee164..100a462cc916 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
return status;
}
-static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cbd..7d33ef4bcb4a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
spin_unlock_irq(&tp->lock);
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
return 0;
}
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+ .intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4440,8 +4440,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
- ((status == RxProtoIP) && !(opts1 & IPFail)))
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4588,7 +4587,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver)) {
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ecc25aab896a..0f4219cb0be2 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -8321,8 +8321,7 @@ mem_alloc_failed:
static void __devexit s2io_rem_nic(struct pci_dev *pdev)
{
- struct net_device *dev =
- (struct net_device *)pci_get_drvdata(pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct s2io_nic *sp;
if (dev == NULL) {
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 50259dfec583..b12660d72338 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -45,9 +45,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
@@ -57,10 +57,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
break;
case 100:/* 100BASE */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
break;
default:
break;
@@ -96,9 +96,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
@@ -108,10 +108,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(0, ioaddr + RTRATE);
+ writel(0, ioaddr + RTRATE);
break;
case 100:/* 100BASE */
- ctrl_outl(1, ioaddr + RTRATE);
+ writel(1, ioaddr + RTRATE);
break;
default:
break;
@@ -143,7 +143,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
static void sh_eth_chip_reset(struct net_device *ndev)
{
/* reset device */
- ctrl_outl(ARSTR_ARSTR, ARSTR);
+ writel(ARSTR_ARSTR, ARSTR);
mdelay(1);
}
@@ -152,10 +152,10 @@ static void sh_eth_reset(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
int cnt = 100;
- ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ writel(EDSR_ENALL, ioaddr + EDSR);
+ writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
while (cnt > 0) {
- if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
+ if (!(readl(ioaddr + EDMR) & 0x3))
break;
mdelay(1);
cnt--;
@@ -164,14 +164,14 @@ static void sh_eth_reset(struct net_device *ndev)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
- ctrl_outl(0x0, ioaddr + TDLAR);
- ctrl_outl(0x0, ioaddr + TDFAR);
- ctrl_outl(0x0, ioaddr + TDFXR);
- ctrl_outl(0x0, ioaddr + TDFFR);
- ctrl_outl(0x0, ioaddr + RDLAR);
- ctrl_outl(0x0, ioaddr + RDFAR);
- ctrl_outl(0x0, ioaddr + RDFXR);
- ctrl_outl(0x0, ioaddr + RDFFR);
+ writel(0x0, ioaddr + TDLAR);
+ writel(0x0, ioaddr + TDFAR);
+ writel(0x0, ioaddr + TDFXR);
+ writel(0x0, ioaddr + TDFFR);
+ writel(0x0, ioaddr + RDLAR);
+ writel(0x0, ioaddr + RDFAR);
+ writel(0x0, ioaddr + RDFXR);
+ writel(0x0, ioaddr + RDFFR);
}
static void sh_eth_set_duplex(struct net_device *ndev)
@@ -180,9 +180,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
@@ -192,13 +192,13 @@ static void sh_eth_set_rate(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(GECMR_10, ioaddr + GECMR);
+ writel(GECMR_10, ioaddr + GECMR);
break;
case 100:/* 100BASE */
- ctrl_outl(GECMR_100, ioaddr + GECMR);
+ writel(GECMR_100, ioaddr + GECMR);
break;
case 1000: /* 1000BASE */
- ctrl_outl(GECMR_1000, ioaddr + GECMR);
+ writel(GECMR_1000, ioaddr + GECMR);
break;
default:
break;
@@ -283,9 +283,9 @@ static void sh_eth_reset(struct net_device *ndev)
{
u32 ioaddr = ndev->base_addr;
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
mdelay(3);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+ writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
}
#endif
@@ -336,10 +336,10 @@ static void update_mac_address(struct net_device *ndev)
{
u32 ioaddr = ndev->base_addr;
- ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
ioaddr + MAHR);
- ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
+ writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
ioaddr + MALR);
}
@@ -358,12 +358,12 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
memcpy(ndev->dev_addr, mac, 6);
} else {
- ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
- ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
- ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
- ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
- ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
- ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
+ ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
+ ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
+ ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
}
}
@@ -379,19 +379,19 @@ struct bb_info {
/* PHY bit set */
static void bb_set(u32 addr, u32 msk)
{
- ctrl_outl(ctrl_inl(addr) | msk, addr);
+ writel(readl(addr) | msk, addr);
}
/* PHY bit clear */
static void bb_clr(u32 addr, u32 msk)
{
- ctrl_outl((ctrl_inl(addr) & ~msk), addr);
+ writel((readl(addr) & ~msk), addr);
}
/* PHY bit read */
static int bb_read(u32 addr, u32 msk)
{
- return (ctrl_inl(addr) & msk) != 0;
+ return (readl(addr) & msk) != 0;
}
/* Data I/O pin control */
@@ -506,9 +506,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
- ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
+ writel(mdp->rx_desc_dma, ioaddr + RDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
+ writel(mdp->rx_desc_dma, ioaddr + RDFAR);
#endif
}
}
@@ -528,9 +528,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
txdesc->buffer_length = 0;
if (i == 0) {
/* Tx descriptor address set */
- ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
+ writel(mdp->tx_desc_dma, ioaddr + TDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
+ writel(mdp->tx_desc_dma, ioaddr + TDFAR);
#endif
}
}
@@ -623,71 +623,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
- ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
+ writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
/* all sh_eth int mask */
- ctrl_outl(0, ioaddr + EESIPR);
+ writel(0, ioaddr + EESIPR);
#if defined(__LITTLE_ENDIAN__)
if (mdp->cd->hw_swap)
- ctrl_outl(EDMR_EL, ioaddr + EDMR);
+ writel(EDMR_EL, ioaddr + EDMR);
else
#endif
- ctrl_outl(0, ioaddr + EDMR);
+ writel(0, ioaddr + EDMR);
/* FIFO size set */
- ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
- ctrl_outl(0, ioaddr + TFTR);
+ writel(mdp->cd->fdr_value, ioaddr + FDR);
+ writel(0, ioaddr + TFTR);
/* Frame recv control */
- ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
+ writel(mdp->cd->rmcr_value, ioaddr + RMCR);
rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
- ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
+ writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
if (mdp->cd->bculr)
- ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */
+ writel(0x800, ioaddr + BCULR); /* Burst sycle set */
- ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
+ writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
if (!mdp->cd->no_trimd)
- ctrl_outl(0, ioaddr + TRIMD);
+ writel(0, ioaddr + TRIMD);
/* Recv frame limit set register */
- ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
+ writel(RFLR_VALUE, ioaddr + RFLR);
- ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
- ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
+ writel(readl(ioaddr + EESR), ioaddr + EESR);
+ writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
/* PAUSE Prohibition */
- val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
+ val = (readl(ioaddr + ECMR) & ECMR_DM) |
ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
- ctrl_outl(val, ioaddr + ECMR);
+ writel(val, ioaddr + ECMR);
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
/* E-MAC Status Register clear */
- ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
+ writel(mdp->cd->ecsr_value, ioaddr + ECSR);
/* E-MAC Interrupt Enable register */
- ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
+ writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
/* mask reset */
if (mdp->cd->apr)
- ctrl_outl(APR_AP, ioaddr + APR);
+ writel(APR_AP, ioaddr + APR);
if (mdp->cd->mpr)
- ctrl_outl(MPR_MP, ioaddr + MPR);
+ writel(MPR_MP, ioaddr + MPR);
if (mdp->cd->tpauser)
- ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
+ writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
/* Setting the Rx mode will start the Rx process. */
- ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+ writel(EDRRR_R, ioaddr + EDRRR);
netif_start_queue(ndev);
@@ -811,8 +811,8 @@ static int sh_eth_rx(struct net_device *ndev)
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
- ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
+ if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
+ writel(EDRRR_R, ndev->base_addr + EDRRR);
return 0;
}
@@ -827,8 +827,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
u32 mask;
if (intr_status & EESR_ECI) {
- felic_stat = ctrl_inl(ioaddr + ECSR);
- ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */
+ felic_stat = readl(ioaddr + ECSR);
+ writel(felic_stat, ioaddr + ECSR); /* clear int */
if (felic_stat & ECSR_ICD)
mdp->stats.tx_carrier_errors++;
if (felic_stat & ECSR_LCHNG) {
@@ -839,25 +839,25 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
else
link_stat = PHY_ST_LINK;
} else {
- link_stat = (ctrl_inl(ioaddr + PSR));
+ link_stat = (readl(ioaddr + PSR));
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
if (!(link_stat & PHY_ST_LINK)) {
/* Link Down : disable tx and rx */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) &
+ writel(readl(ioaddr + ECMR) &
~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
} else {
/* Link Up */
- ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
+ writel(readl(ioaddr + EESIPR) &
~DMAC_M_ECI, ioaddr + EESIPR);
/*clear int */
- ctrl_outl(ctrl_inl(ioaddr + ECSR),
+ writel(readl(ioaddr + ECSR),
ioaddr + ECSR);
- ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
+ writel(readl(ioaddr + EESIPR) |
DMAC_M_ECI, ioaddr + EESIPR);
/* enable tx and rx */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) |
+ writel(readl(ioaddr + ECMR) |
(ECMR_RE | ECMR_TE), ioaddr + ECMR);
}
}
@@ -888,8 +888,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Receive Descriptor Empty int */
mdp->stats.rx_over_errors++;
- if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
- ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+ if (readl(ioaddr + EDRRR) ^ EDRRR_R)
+ writel(EDRRR_R, ioaddr + EDRRR);
dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
@@ -903,7 +903,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
mask &= ~EESR_ADE;
if (intr_status & mask) {
/* Tx error */
- u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
+ u32 edtrr = readl(ndev->base_addr + EDTRR);
/* dmesg */
dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
intr_status, mdp->cur_tx);
@@ -915,7 +915,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* SH7712 BUG */
if (edtrr ^ EDTRR_TRNS) {
/* tx dma start */
- ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+ writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
}
/* wakeup */
netif_wake_queue(ndev);
@@ -934,12 +934,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
spin_lock(&mdp->lock);
/* Get interrpt stat */
- intr_status = ctrl_inl(ioaddr + EESR);
+ intr_status = readl(ioaddr + EESR);
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
cd->tx_check | cd->eesr_err_check)) {
- ctrl_outl(intr_status, ioaddr + EESR);
+ writel(intr_status, ioaddr + EESR);
ret = IRQ_HANDLED;
} else
goto other_irq;
@@ -1000,7 +1000,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
+ writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
| ECMR_DM, ioaddr + ECMR);
new_state = 1;
mdp->link = phydev->link;
@@ -1125,7 +1125,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* worning message out. */
printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
- " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
+ " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
/* tx_errors count up */
mdp->stats.tx_errors++;
@@ -1196,8 +1196,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
mdp->cur_tx++;
- if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
- ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+ if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
+ writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
return NETDEV_TX_OK;
}
@@ -1212,11 +1212,11 @@ static int sh_eth_close(struct net_device *ndev)
netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */
- ctrl_outl(0x0000, ioaddr + EESIPR);
+ writel(0x0000, ioaddr + EESIPR);
/* Stop the chip's Tx and Rx processes. */
- ctrl_outl(0, ioaddr + EDTRR);
- ctrl_outl(0, ioaddr + EDRRR);
+ writel(0, ioaddr + EDTRR);
+ writel(0, ioaddr + EDRRR);
/* PHY Disconnect */
if (mdp->phydev) {
@@ -1251,20 +1251,20 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
pm_runtime_get_sync(&mdp->pdev->dev);
- mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
- ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
- mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
- ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
- ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
+ mdp->stats.tx_dropped += readl(ioaddr + TROCR);
+ writel(0, ioaddr + TROCR); /* (write clear) */
+ mdp->stats.collisions += readl(ioaddr + CDCR);
+ writel(0, ioaddr + CDCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
+ writel(0, ioaddr + LCCR); /* (write clear) */
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
- ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
- ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
+ writel(0, ioaddr + CERCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
+ writel(0, ioaddr + CEECR); /* (write clear) */
#else
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
- ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
+ writel(0, ioaddr + CNDCR); /* (write clear) */
#endif
pm_runtime_put_sync(&mdp->pdev->dev);
@@ -1295,11 +1295,11 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC) {
/* Set promiscuous. */
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
+ writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
ioaddr + ECMR);
} else {
/* Normal, unicast/broadcast-only mode. */
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
+ writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
ioaddr + ECMR);
}
}
@@ -1307,30 +1307,30 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
/* SuperH's TSU register init function */
static void sh_eth_tsu_init(u32 ioaddr)
{
- ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
- ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
- ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
- ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
- ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
- ctrl_outl(0, ioaddr + TSU_PRISL0);
- ctrl_outl(0, ioaddr + TSU_PRISL1);
- ctrl_outl(0, ioaddr + TSU_FWSL0);
- ctrl_outl(0, ioaddr + TSU_FWSL1);
- ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
+ writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
+ writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
+ writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
+ writel(0xc, ioaddr + TSU_BSYSL0);
+ writel(0xc, ioaddr + TSU_BSYSL1);
+ writel(0, ioaddr + TSU_PRISL0);
+ writel(0, ioaddr + TSU_PRISL1);
+ writel(0, ioaddr + TSU_FWSL0);
+ writel(0, ioaddr + TSU_FWSL1);
+ writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
- ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
+ writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
+ writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
#else
- ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
- ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
+ writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
+ writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
#endif
- ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
- ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
- ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
- ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
- ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
- ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
- ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
+ writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
+ writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
+ writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
+ writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
+ writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
+ writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
+ writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
}
#endif /* SH_ETH_HAS_TSU */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0f5275..220e0398f1d5 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* device is off until link detection */
netif_carrier_off(dev);
- netif_stop_queue(dev);
return dev;
}
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 52f38e12a879..50f712e99e96 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
#define __SMSC911X_H__
#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
-#define SMSC911X_EEPROM_SIZE ((u32)7)
+#define SMSC911X_EEPROM_SIZE ((u32)128)
#define USE_DEBUG 0
/* This is the maximum number of packets to be received every
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb13db9..c78a50586c1d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
de->media_timer.data = (unsigned long) de;
netif_carrier_off(dev);
- netif_stop_queue(dev);
/* wake up device, assign resources */
rc = pci_enable_device(pdev);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f5708246..acbdab3d66ca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
ugeth_vdbg("%s: IN", __func__);
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- /* Tell the kernel the link is down */
- phy_stop(phydev);
-
/* Mask all interrupts */
out_be32(ugeth->uccf->p_uccm, 0x00000000);
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
-
ucc_geth_memclean(ugeth);
}
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
napi_disable(&ugeth->napi);
+ cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* Must reset MAC *and* PHY. This is done by reopening
* the device.
*/
- ucc_geth_close(dev);
- ucc_geth_open(dev);
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
}
netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
- netif_carrier_off(dev);
schedule_work(&ugeth->timeout_work);
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94de03e..be8cc2a8e213 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1745,7 +1745,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hso_serial *serial = get_serial_by_tty(tty);
- void __user *uarg = (void __user *)arg;
int ret = 0;
D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99e6f08..7d42f9a2c068 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
/* Paranoid */
if (skb->len > IPHETH_BUF_SIZE) {
- WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+ WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
dev->net->stats.tx_dropped++;
dev_kfree_skb_irq(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6710f09346d6..ef3667690b12 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -359,7 +359,7 @@ fail:
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
- pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+ pegasus_t *pegasus = netdev_priv(dev);
u16 res;
read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@ fail:
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
{
- pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+ pegasus_t *pegasus = netdev_priv(dev);
write_mii_word(pegasus, phy_id, loc, val);
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9df1ccf..c04d49e31f81 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
#define DRIVER_VERSION "22-Aug-2005"
@@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
struct usb_device *xdev;
int status;
const char *name;
+ struct usb_driver *driver = to_usb_driver(udev->dev.driver);
+
+ /* usbnet already took usb runtime pm, so have to enable the feature
+ * for usb interface, otherwise usb_autopm_get_interface may return
+ * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ */
+ if (!driver->supports_autosuspend) {
+ driver->supports_autosuspend = 1;
+ pm_runtime_enable(&udev->dev);
+ }
name = udev->dev.driver->name;
info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0cc..b6d402806ae6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
goto unregister;
}
- vi->status = VIRTIO_NET_S_LINK_UP;
- virtnet_update_status(vi);
- netif_carrier_on(dev);
+ /* Assume link up if device can't report link status,
+ otherwise get link status from config. */
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ netif_carrier_off(dev);
+ virtnet_update_status(vi);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 21314e06e6d7..65860a998321 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -44,6 +44,9 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
static atomic_t devices_found;
+#define VMXNET3_MAX_DEVICES 10
+static int enable_mq = 1;
+static int irq_share_mode;
/*
* Enable/Disable the given intr
@@ -99,7 +102,7 @@ vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
- return netif_queue_stopped(adapter->netdev);
+ return tq->stopped;
}
@@ -107,7 +110,7 @@ static void
vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
- netif_start_queue(adapter->netdev);
+ netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
}
@@ -115,7 +118,7 @@ static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
- netif_wake_queue(adapter->netdev);
+ netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
@@ -124,7 +127,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = true;
tq->num_stop++;
- netif_stop_queue(adapter->netdev);
+ netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
@@ -135,6 +138,7 @@ static void
vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
{
u32 ret;
+ int i;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -145,22 +149,28 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
if (!netif_carrier_ok(adapter->netdev))
netif_carrier_on(adapter->netdev);
- if (affectTxQueue)
- vmxnet3_tq_start(&adapter->tx_queue, adapter);
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_start(&adapter->tx_queue[i],
+ adapter);
+ }
} else {
printk(KERN_INFO "%s: NIC Link is Down\n",
adapter->netdev->name);
if (netif_carrier_ok(adapter->netdev))
netif_carrier_off(adapter->netdev);
- if (affectTxQueue)
- vmxnet3_tq_stop(&adapter->tx_queue, adapter);
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
+ }
}
}
static void
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
{
+ int i;
u32 events = le32_to_cpu(adapter->shared->ecr);
if (!events)
return;
@@ -176,16 +186,18 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
- if (adapter->tqd_start->status.stopped) {
- printk(KERN_ERR "%s: tq error 0x%x\n",
- adapter->netdev->name,
- le32_to_cpu(adapter->tqd_start->status.error));
- }
- if (adapter->rqd_start->status.stopped) {
- printk(KERN_ERR "%s: rq error 0x%x\n",
- adapter->netdev->name,
- adapter->rqd_start->status.error);
- }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: tq[%d] error 0x%x\n",
+ adapter->netdev->name, i, le32_to_cpu(
+ adapter->tqd_start[i].status.error));
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: rq[%d] error 0x%x\n",
+ adapter->netdev->name, i,
+ adapter->rqd_start[i].status.error);
schedule_work(&adapter->work);
}
@@ -410,7 +422,7 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
}
-void
+static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
@@ -437,6 +449,17 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
}
+/* Destroy all tx queues */
+void
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
+}
+
+
static void
vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
@@ -518,6 +541,14 @@ err:
return -ENOMEM;
}
+static void
+vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
+}
/*
* starting from ring->next2fill, allocate rx buffers for the given ring
@@ -732,6 +763,17 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
+/* Init all tx queues */
+static void
+vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
+}
+
+
/*
* parse and copy relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
@@ -903,6 +945,21 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
}
+ spin_lock_irqsave(&tq->tx_lock, flags);
+
+ if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+ tq->stats.tx_ring_full++;
+ dev_dbg(&adapter->netdev->dev,
+ "tx queue stopped on %s, next2comp %u"
+ " next2fill %u\n", adapter->netdev->name,
+ tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+ vmxnet3_tq_stop(tq, adapter);
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+
ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
@@ -926,20 +983,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
goto drop_pkt;
}
- spin_lock_irqsave(&tq->tx_lock, flags);
-
- if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
- tq->stats.tx_ring_full++;
- dev_dbg(&adapter->netdev->dev,
- "tx queue stopped on %s, next2comp %u"
- " next2fill %u\n", adapter->netdev->name,
- tq->tx_ring.next2comp, tq->tx_ring.next2fill);
-
- vmxnet3_tq_stop(tq, adapter);
- spin_unlock_irqrestore(&tq->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
-
/* fill tx descs related to addr & len */
vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
@@ -1000,7 +1043,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (le32_to_cpu(tq->shared->txNumDeferred) >=
le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_TXPROD + tq->qid * 8,
tq->tx_ring.next2fill);
}
@@ -1020,7 +1064,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
+ BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+ return vmxnet3_tq_xmit(skb,
+ &adapter->tx_queue[skb->queue_mapping],
+ adapter, netdev);
}
@@ -1106,9 +1153,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
break;
}
num_rxd++;
-
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
- ring_idx = rcd->rqID == rq->qid ? 0 : 1;
+ ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
@@ -1260,6 +1307,16 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
}
+static void
+vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
+}
+
+
void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
@@ -1351,6 +1408,25 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
static int
+vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev, "%s: failed to "
+ "initialize rx queue%i\n",
+ adapter->netdev->name, i);
+ break;
+ }
+ }
+ return err;
+
+}
+
+
+static int
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
{
int i;
@@ -1398,33 +1474,177 @@ err:
static int
+vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev,
+ "%s: failed to create rx queue%i\n",
+ adapter->netdev->name, i);
+ goto err_out;
+ }
+ }
+ return err;
+err_out:
+ vmxnet3_rq_destroy_all(adapter);
+ return err;
+
+}
+
+/* Multiple queue aware polling function for tx and rx */
+
+static int
vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
{
+ int rcd_done = 0, i;
if (unlikely(adapter->shared->ecr))
vmxnet3_process_events(adapter);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
- vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
- return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
+ adapter, budget);
+ return rcd_done;
}
static int
vmxnet3_poll(struct napi_struct *napi, int budget)
{
- struct vmxnet3_adapter *adapter = container_of(napi,
- struct vmxnet3_adapter, napi);
+ struct vmxnet3_rx_queue *rx_queue = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
+ int rxd_done;
+
+ rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
+
+ if (rxd_done < budget) {
+ napi_complete(napi);
+ vmxnet3_enable_all_intrs(rx_queue->adapter);
+ }
+ return rxd_done;
+}
+
+/*
+ * NAPI polling function for MSI-X mode with multiple Rx queues
+ * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
+ */
+
+static int
+vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
+{
+ struct vmxnet3_rx_queue *rq = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
+ struct vmxnet3_adapter *adapter = rq->adapter;
int rxd_done;
- rxd_done = vmxnet3_do_poll(adapter, budget);
+ /* When sharing interrupt with corresponding tx queue, process
+ * tx completions in that queue as well
+ */
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+ struct vmxnet3_tx_queue *tq =
+ &adapter->tx_queue[rq - adapter->rx_queue];
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+
+ rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
napi_complete(napi);
- vmxnet3_enable_intr(adapter, 0);
+ vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
}
return rxd_done;
}
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Handle completion interrupts on tx queues
+ * Returns whether or not the intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_tx(int irq, void *data)
+{
+ struct vmxnet3_tx_queue *tq = data;
+ struct vmxnet3_adapter *adapter = tq->adapter;
+
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
+
+ /* Handle the case where only one irq is allocate for all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
+ vmxnet3_tq_tx_complete(txq, adapter);
+ }
+ } else {
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+ vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * Handle completion interrupts on rx queues. Returns whether or not the
+ * intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_rx(int irq, void *data)
+{
+ struct vmxnet3_rx_queue *rq = data;
+ struct vmxnet3_adapter *adapter = rq->adapter;
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
+ napi_schedule(&rq->napi);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ *
+ * vmxnet3_msix_event --
+ *
+ * vmxnet3 msix event intr handler
+ *
+ * Result:
+ * whether or not the intr is handled
+ *
+ *----------------------------------------------------------------------------
+ */
+
+static irqreturn_t
+vmxnet3_msix_event(int irq, void *data)
+{
+ struct net_device *dev = data;
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
+
+ if (adapter->shared->ecr)
+ vmxnet3_process_events(adapter);
+
+ vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+#endif /* CONFIG_PCI_MSI */
+
+
/* Interrupt handler for vmxnet3 */
static irqreturn_t
vmxnet3_intr(int irq, void *dev_id)
@@ -1432,7 +1652,7 @@ vmxnet3_intr(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
- if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
+ if (adapter->intr.type == VMXNET3_IT_INTX) {
u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
if (unlikely(icr == 0))
/* not ours */
@@ -1442,77 +1662,144 @@ vmxnet3_intr(int irq, void *dev_id)
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_intr(adapter, 0);
+ vmxnet3_disable_all_intrs(adapter);
- napi_schedule(&adapter->napi);
+ napi_schedule(&adapter->rx_queue[0].napi);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-
/* netpoll callback. */
static void
vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- int irq;
-#ifdef CONFIG_PCI_MSI
- if (adapter->intr.type == VMXNET3_IT_MSIX)
- irq = adapter->intr.msix_entries[0].vector;
- else
-#endif
- irq = adapter->pdev->irq;
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_all_intrs(adapter);
+
+ vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
+ vmxnet3_enable_all_intrs(adapter);
- disable_irq(irq);
- vmxnet3_intr(irq, netdev);
- enable_irq(irq);
}
-#endif
+#endif /* CONFIG_NET_POLL_CONTROLLER */
static int
vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
{
- int err;
+ struct vmxnet3_intr *intr = &adapter->intr;
+ int err = 0, i;
+ int vector = 0;
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- /* we only use 1 MSI-X vector */
- err = request_irq(adapter->intr.msix_entries[0].vector,
- vmxnet3_intr, 0, adapter->netdev->name,
- adapter->netdev);
- } else if (adapter->intr.type == VMXNET3_IT_MSI) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(
+ intr->msix_entries[vector].vector,
+ vmxnet3_msix_tx, 0,
+ adapter->tx_queue[i].name,
+ &adapter->tx_queue[i]);
+ } else {
+ sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ }
+ if (err) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to request irq for MSIX, %s, "
+ "error %d\n",
+ adapter->tx_queue[i].name, err);
+ return err;
+ }
+
+ /* Handle the case where only 1 MSIx was allocated for
+ * all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ for (; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector;
+ vector++;
+ break;
+ } else {
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector++;
+ }
+ }
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
+ vector = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
+ sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
+ adapter->netdev->name, vector);
+ else
+ sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_rx, 0,
+ adapter->rx_queue[i].name,
+ &(adapter->rx_queue[i]));
+ if (err) {
+ printk(KERN_ERR "Failed to request irq for MSIX"
+ ", %s, error %d\n",
+ adapter->rx_queue[i].name, err);
+ return err;
+ }
+
+ adapter->rx_queue[i].comp_ring.intr_idx = vector++;
+ }
+
+ sprintf(intr->event_msi_vector_name, "%s-event-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_event, 0,
+ intr->event_msi_vector_name, adapter->netdev);
+ intr->event_intr_idx = vector;
+
+ } else if (intr->type == VMXNET3_IT_MSI) {
+ adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
adapter->netdev->name, adapter->netdev);
- } else
+ } else {
#endif
- {
+ adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
IRQF_SHARED, adapter->netdev->name,
adapter->netdev);
+#ifdef CONFIG_PCI_MSI
}
-
- if (err)
+#endif
+ intr->num_intrs = vector + 1;
+ if (err) {
printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
- ":%d\n", adapter->netdev->name, adapter->intr.type, err);
+ ":%d\n", adapter->netdev->name, intr->type, err);
+ } else {
+ /* Number of rx queues will not change after this */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rq->qid = i;
+ rq->qid2 = i + adapter->num_rx_queues;
+ }
- if (!err) {
- int i;
- /* init our intr settings */
- for (i = 0; i < adapter->intr.num_intrs; i++)
- adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
- /* next setup intr index for all intr sources */
- adapter->tx_queue.comp_ring.intr_idx = 0;
- adapter->rx_queue.comp_ring.intr_idx = 0;
- adapter->intr.event_intr_idx = 0;
+ /* init our intr settings */
+ for (i = 0; i < intr->num_intrs; i++)
+ intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
+ if (adapter->intr.type != VMXNET3_IT_MSIX) {
+ adapter->intr.event_intr_idx = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx = 0;
+ adapter->rx_queue[0].comp_ring.intr_idx = 0;
+ }
printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
- "allocated\n", adapter->netdev->name, adapter->intr.type,
- adapter->intr.mask_mode, adapter->intr.num_intrs);
+ "allocated\n", adapter->netdev->name, intr->type,
+ intr->mask_mode, intr->num_intrs);
}
return err;
@@ -1522,18 +1809,32 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
static void
vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
{
- BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
- adapter->intr.num_intrs <= 0);
+ struct vmxnet3_intr *intr = &adapter->intr;
+ BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
- switch (adapter->intr.type) {
+ switch (intr->type) {
#ifdef CONFIG_PCI_MSI
case VMXNET3_IT_MSIX:
{
- int i;
+ int i, vector = 0;
- for (i = 0; i < adapter->intr.num_intrs; i++)
- free_irq(adapter->intr.msix_entries[i].vector,
- adapter->netdev);
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->tx_queue[i]));
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
+ break;
+ }
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->rx_queue[i]));
+ }
+
+ free_irq(intr->msix_entries[vector].vector,
+ adapter->netdev);
+ BUG_ON(vector >= intr->num_intrs);
break;
}
#endif
@@ -1727,6 +2028,15 @@ vmxnet3_set_mc(struct net_device *netdev)
kfree(new_table);
}
+void
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
+}
+
/*
* Set up driver_shared based on settings in adapter.
@@ -1774,40 +2084,72 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
devRead->misc.queueDescLen = cpu_to_le32(
- sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc));
+ adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+ adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
/* tx queue settings */
- BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
-
- devRead->misc.numTxQueues = 1;
- tqc = &adapter->tqd_start->conf;
- tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
- tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
- tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
- tqc->ddPA = cpu_to_le64(virt_to_phys(
- adapter->tx_queue.buf_info));
- tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
- tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
- tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
- tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
- tqc->txRingSize);
- tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
+ devRead->misc.numTxQueues = adapter->num_tx_queues;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
+ tqc = &adapter->tqd_start[i].conf;
+ tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
+ tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
+ tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
+ tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
+ tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
+ tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
+ tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
+ tqc->ddLen = cpu_to_le32(
+ sizeof(struct vmxnet3_tx_buf_info) *
+ tqc->txRingSize);
+ tqc->intrIdx = tq->comp_ring.intr_idx;
+ }
/* rx queue settings */
- devRead->misc.numRxQueues = 1;
- rqc = &adapter->rqd_start->conf;
- rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
- rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
- rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
- rqc->ddPA = cpu_to_le64(virt_to_phys(
- adapter->rx_queue.buf_info));
- rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
- rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
- rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
- rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
- (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
- rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rqc = &adapter->rqd_start[i].conf;
+ rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
+ rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
+ rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
+ rqc->ddPA = cpu_to_le64(virt_to_phys(
+ rq->buf_info));
+ rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
+ rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
+ rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
+ rqc->ddLen = cpu_to_le32(
+ sizeof(struct vmxnet3_rx_buf_info) *
+ (rqc->rxRingSize[0] +
+ rqc->rxRingSize[1]));
+ rqc->intrIdx = rq->comp_ring.intr_idx;
+ }
+
+#ifdef VMXNET3_RSS
+ memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
+
+ if (adapter->rss) {
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ devRead->misc.uptFeatures |= UPT1_F_RSS;
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
+ UPT1_RSS_HASH_TYPE_IPV4 |
+ UPT1_RSS_HASH_TYPE_TCP_IPV6 |
+ UPT1_RSS_HASH_TYPE_IPV6;
+ rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
+ rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
+ rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
+ get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = i % adapter->num_rx_queues;
+
+ devRead->rssConfDesc.confVer = 1;
+ devRead->rssConfDesc.confLen = sizeof(*rssConf);
+ devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ }
+
+#endif /* VMXNET3_RSS */
/* intr settings */
devRead->intrConf.autoMask = adapter->intr.mask_mode ==
@@ -1829,18 +2171,18 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
- int err;
+ int err, i;
u32 ret;
- dev_dbg(&adapter->netdev->dev,
- "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
- " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
- adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
- adapter->rx_queue.rx_ring[0].size,
- adapter->rx_queue.rx_ring[1].size);
-
- vmxnet3_tq_init(&adapter->tx_queue, adapter);
- err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
+ dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
+ " ring sizes %u %u %u\n", adapter->netdev->name,
+ adapter->skb_buf_size, adapter->rx_buf_per_pkt,
+ adapter->tx_queue[0].tx_ring.size,
+ adapter->rx_queue[0].rx_ring[0].size,
+ adapter->rx_queue[0].rx_ring[1].size);
+
+ vmxnet3_tq_init_all(adapter);
+ err = vmxnet3_rq_init_all(adapter);
if (err) {
printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
adapter->netdev->name, err);
@@ -1870,10 +2212,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
err = -EINVAL;
goto activate_err;
}
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
- adapter->rx_queue.rx_ring[0].next2fill);
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
- adapter->rx_queue.rx_ring[1].next2fill);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
+ adapter->rx_queue[i].rx_ring[0].next2fill);
+ VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
+ (i * VMXNET3_REG_ALIGN)),
+ adapter->rx_queue[i].rx_ring[1].next2fill);
+ }
/* Apply the rx filter settins last. */
vmxnet3_set_mc(adapter->netdev);
@@ -1883,8 +2230,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter, true);
-
- napi_enable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
return 0;
@@ -1896,7 +2243,7 @@ activate_err:
irq_err:
rq_err:
/* free up buffers we allocated */
- vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ vmxnet3_rq_cleanup_all(adapter);
return err;
}
@@ -1911,6 +2258,7 @@ vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
+ int i;
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
@@ -1919,13 +2267,14 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
VMXNET3_CMD_QUIESCE_DEV);
vmxnet3_disable_all_intrs(adapter);
- napi_disable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
netif_tx_disable(adapter->netdev);
adapter->link_speed = 0;
netif_carrier_off(adapter->netdev);
- vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
- vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ vmxnet3_tq_cleanup_all(adapter);
+ vmxnet3_rq_cleanup_all(adapter);
vmxnet3_free_irqs(adapter);
return 0;
}
@@ -2047,7 +2396,9 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
- size_t sz;
+ size_t sz, i, ring0_size, ring1_size, comp_size;
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
+
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
VMXNET3_MAX_ETH_HDR_SIZE) {
@@ -2069,11 +2420,19 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
- adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
- sz - 1) / sz * sz;
- adapter->rx_queue.rx_ring[0].size = min_t(u32,
- adapter->rx_queue.rx_ring[0].size,
- VMXNET3_RX_RING_MAX_SIZE / sz * sz);
+ ring0_size = adapter->rx_queue[0].rx_ring[0].size;
+ ring0_size = (ring0_size + sz - 1) / sz * sz;
+ ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+ sz * sz);
+ ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+ comp_size = ring0_size + ring1_size;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rq = &adapter->rx_queue[i];
+ rq->rx_ring[0].size = ring0_size;
+ rq->rx_ring[1].size = ring1_size;
+ rq->comp_ring.size = comp_size;
+ }
}
@@ -2081,29 +2440,53 @@ int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
u32 rx_ring_size, u32 rx_ring2_size)
{
- int err;
-
- adapter->tx_queue.tx_ring.size = tx_ring_size;
- adapter->tx_queue.data_ring.size = tx_ring_size;
- adapter->tx_queue.comp_ring.size = tx_ring_size;
- adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
- adapter->tx_queue.stopped = true;
- err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
- if (err)
- return err;
+ int err = 0, i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ tq->tx_ring.size = tx_ring_size;
+ tq->data_ring.size = tx_ring_size;
+ tq->comp_ring.size = tx_ring_size;
+ tq->shared = &adapter->tqd_start[i].ctrl;
+ tq->stopped = true;
+ tq->adapter = adapter;
+ tq->qid = i;
+ err = vmxnet3_tq_create(tq, adapter);
+ /*
+ * Too late to change num_tx_queues. We cannot do away with
+ * lesser number of queues than what we asked for
+ */
+ if (err)
+ goto queue_err;
+ }
- adapter->rx_queue.rx_ring[0].size = rx_ring_size;
- adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
+ adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
+ adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
- adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
- adapter->rx_queue.rx_ring[1].size;
- adapter->rx_queue.qid = 0;
- adapter->rx_queue.qid2 = 1;
- adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
- err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
- if (err)
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
-
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ /* qid and qid2 for rx queues will be assigned later when num
+ * of rx queues is finalized after allocating intrs */
+ rq->shared = &adapter->rqd_start[i].ctrl;
+ rq->adapter = adapter;
+ err = vmxnet3_rq_create(rq, adapter);
+ if (err) {
+ if (i == 0) {
+ printk(KERN_ERR "Could not allocate any rx"
+ "queues. Aborting.\n");
+ goto queue_err;
+ } else {
+ printk(KERN_INFO "Number of rx queues changed "
+ "to : %d.\n", i);
+ adapter->num_rx_queues = i;
+ err = 0;
+ break;
+ }
+ }
+ }
+ return err;
+queue_err:
+ vmxnet3_tq_destroy_all(adapter);
return err;
}
@@ -2111,11 +2494,12 @@ static int
vmxnet3_open(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter;
- int err;
+ int err, i;
adapter = netdev_priv(netdev);
- spin_lock_init(&adapter->tx_queue.tx_lock);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ spin_lock_init(&adapter->tx_queue[i].tx_lock);
err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
@@ -2130,8 +2514,8 @@ vmxnet3_open(struct net_device *netdev)
return 0;
activate_err:
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
queue_err:
return err;
}
@@ -2151,8 +2535,8 @@ vmxnet3_close(struct net_device *netdev)
vmxnet3_quiesce_dev(adapter);
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
@@ -2164,6 +2548,8 @@ vmxnet3_close(struct net_device *netdev)
void
vmxnet3_force_close(struct vmxnet3_adapter *adapter)
{
+ int i;
+
/*
* we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
* vmxnet3_close() will deadlock.
@@ -2171,7 +2557,8 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
/* we need to enable NAPI, otherwise dev_close will deadlock */
- napi_enable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
dev_close(adapter->netdev);
}
@@ -2202,14 +2589,11 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
vmxnet3_reset_dev(adapter);
/* we need to re-create the rx queue based on the new mtu */
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
vmxnet3_adjust_rx_ring_size(adapter);
- adapter->rx_queue.comp_ring.size =
- adapter->rx_queue.rx_ring[0].size +
- adapter->rx_queue.rx_ring[1].size;
- err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
+ err = vmxnet3_rq_create_all(adapter);
if (err) {
- printk(KERN_ERR "%s: failed to re-create rx queue,"
+ printk(KERN_ERR "%s: failed to re-create rx queues,"
" error %d. Closing it.\n", netdev->name, err);
goto out;
}
@@ -2274,6 +2658,55 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
mac[5] = (tmp >> 8) & 0xff;
}
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Enable MSIx vectors.
+ * Returns :
+ * 0 on successful enabling of required vectors,
+ * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
+ * could be enabled.
+ * number of vectors which can be enabled otherwise (this number is smaller
+ * than VMXNET3_LINUX_MIN_MSIX_VECT)
+ */
+
+static int
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
+ int vectors)
+{
+ int err = 0, vector_threshold;
+ vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
+ vectors);
+ if (!err) {
+ adapter->intr.num_intrs = vectors;
+ return 0;
+ } else if (err < 0) {
+ printk(KERN_ERR "Failed to enable MSI-X for %s, error"
+ " %d\n", adapter->netdev->name, err);
+ vectors = 0;
+ } else if (err < vector_threshold) {
+ break;
+ } else {
+ /* If fails to enable required number of MSI-x vectors
+ * try enabling 3 of them. One each for rx, tx and event
+ */
+ vectors = vector_threshold;
+ printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
+ " %d instead\n", vectors, adapter->netdev->name,
+ vector_threshold);
+ }
+ }
+
+ printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
+ " are lower than min threshold required.\n");
+ return err;
+}
+
+
+#endif /* CONFIG_PCI_MSI */
static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
@@ -2293,16 +2726,47 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int err;
-
- adapter->intr.msix_entries[0].entry = 0;
- err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
- VMXNET3_LINUX_MAX_MSIX_VECT);
- if (!err) {
- adapter->intr.num_intrs = 1;
- adapter->intr.type = VMXNET3_IT_MSIX;
+ int vector, err = 0;
+
+ adapter->intr.num_intrs = (adapter->share_intr ==
+ VMXNET3_INTR_TXSHARE) ? 1 :
+ adapter->num_tx_queues;
+ adapter->intr.num_intrs += (adapter->share_intr ==
+ VMXNET3_INTR_BUDDYSHARE) ? 0 :
+ adapter->num_rx_queues;
+ adapter->intr.num_intrs += 1; /* for link event */
+
+ adapter->intr.num_intrs = (adapter->intr.num_intrs >
+ VMXNET3_LINUX_MIN_MSIX_VECT
+ ? adapter->intr.num_intrs :
+ VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ for (vector = 0; vector < adapter->intr.num_intrs; vector++)
+ adapter->intr.msix_entries[vector].entry = vector;
+
+ err = vmxnet3_acquire_msix_vectors(adapter,
+ adapter->intr.num_intrs);
+ /* If we cannot allocate one MSIx vector per queue
+ * then limit the number of rx queues to 1
+ */
+ if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
+ || adapter->num_rx_queues != 2) {
+ adapter->share_intr = VMXNET3_INTR_TXSHARE;
+ printk(KERN_ERR "Number of rx queues : 1\n");
+ adapter->num_rx_queues = 1;
+ adapter->intr.num_intrs =
+ VMXNET3_LINUX_MIN_MSIX_VECT;
+ }
return;
}
+ if (!err)
+ return;
+
+ /* If we cannot allocate MSIx vectors use only one rx queue */
+ printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
+ "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
+
adapter->intr.type = VMXNET3_IT_MSI;
}
@@ -2310,12 +2774,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
int err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
+ adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
}
}
#endif /* CONFIG_PCI_MSI */
+ adapter->num_rx_queues = 1;
+ printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
@@ -2343,6 +2810,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
schedule_work(&adapter->work);
+ netif_wake_queue(adapter->netdev);
}
@@ -2399,8 +2867,29 @@ vmxnet3_probe_device(struct pci_dev *pdev,
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
+ int size;
+ int num_tx_queues;
+ int num_rx_queues;
+
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
+
+ if (enable_mq)
+ num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
+ (int)num_online_cpus());
+ else
+ num_tx_queues = 1;
+
+ netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
+ max(num_tx_queues, num_rx_queues));
+ printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
+ num_tx_queues, num_rx_queues);
- netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
if (!netdev) {
printk(KERN_ERR "Failed to alloc ethernet device for adapter "
"%s\n", pci_name(pdev));
@@ -2422,9 +2911,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_shared;
}
- adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
- sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
+ adapter->num_rx_queues = num_rx_queues;
+ adapter->num_tx_queues = num_tx_queues;
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
+ adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
&adapter->queue_desc_pa);
if (!adapter->tqd_start) {
@@ -2433,8 +2925,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
err = -ENOMEM;
goto err_alloc_queue_desc;
}
- adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
- + 1);
+ adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
+ adapter->num_tx_queues);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
@@ -2444,6 +2936,17 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pm;
}
+#ifdef VMXNET3_RSS
+
+ adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
+ if (adapter->rss_conf == NULL) {
+ printk(KERN_ERR "Failed to allocate memory for %s\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto err_alloc_rss;
+ }
+#endif /* VMXNET3_RSS */
+
err = vmxnet3_alloc_pci_resources(adapter, &dma64);
if (err < 0)
goto err_alloc_pci;
@@ -2471,18 +2974,48 @@ vmxnet3_probe_device(struct pci_dev *pdev,
vmxnet3_declare_features(adapter, dma64);
adapter->dev_number = atomic_read(&devices_found);
+
+ adapter->share_intr = irq_share_mode;
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
+ adapter->num_tx_queues != adapter->num_rx_queues)
+ adapter->share_intr = VMXNET3_INTR_DONTSHARE;
+
vmxnet3_alloc_intr_resources(adapter);
+#ifdef VMXNET3_RSS
+ if (adapter->num_rx_queues > 1 &&
+ adapter->intr.type == VMXNET3_IT_MSIX) {
+ adapter->rss = true;
+ printk(KERN_INFO "RSS is enabled.\n");
+ } else {
+ adapter->rss = false;
+ }
+#endif
+
vmxnet3_read_mac_addr(adapter, mac);
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev->netdev_ops = &vmxnet3_netdev_ops;
- netdev->watchdog_timeo = 5 * HZ;
vmxnet3_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
- netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
+ if (adapter->intr.type == VMXNET3_IT_MSIX) {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ netif_napi_add(adapter->netdev,
+ &adapter->rx_queue[i].napi,
+ vmxnet3_poll_rx_only, 64);
+ }
+ } else {
+ netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
+ vmxnet3_poll, 64);
+ }
+
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+
SET_NETDEV_DEV(netdev, &pdev->dev);
err = register_netdev(netdev);
@@ -2502,11 +3035,14 @@ err_register:
err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
+#ifdef VMXNET3_RSS
+ kfree(adapter->rss_conf);
+err_alloc_rss:
+#endif
kfree(adapter->pm_conf);
err_alloc_pm:
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
- adapter->tqd_start, adapter->queue_desc_pa);
+ pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
err_alloc_queue_desc:
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
@@ -2522,6 +3058,16 @@ vmxnet3_remove_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int size = 0;
+ int num_rx_queues;
+
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
flush_scheduled_work();
@@ -2529,10 +3075,15 @@ vmxnet3_remove_device(struct pci_dev *pdev)
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
+#ifdef VMXNET3_RSS
+ kfree(adapter->rss_conf);
+#endif
kfree(adapter->pm_conf);
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
- adapter->tqd_start, adapter->queue_desc_pa);
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
+ pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
free_netdev(netdev);
@@ -2563,7 +3114,7 @@ vmxnet3_suspend(struct device *device)
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b79070bcc92e..9ddaea636cfa 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -151,44 +151,42 @@ vmxnet3_get_stats(struct net_device *netdev)
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
struct net_device_stats *net_stats = &netdev->stats;
+ int i;
adapter = netdev_priv(netdev);
/* Collect the dev stats into the shared area */
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
- /* Assuming that we have a single queue device */
- devTxStats = &adapter->tqd_start->stats;
- devRxStats = &adapter->rqd_start->stats;
-
- /* Get access to the driver stats per queue */
- drvTxStats = &adapter->tx_queue.stats;
- drvRxStats = &adapter->rx_queue.stats;
-
memset(net_stats, 0, sizeof(*net_stats));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ devTxStats = &adapter->tqd_start[i].stats;
+ drvTxStats = &adapter->tx_queue[i].stats;
+ net_stats->tx_packets += devTxStats->ucastPktsTxOK +
+ devTxStats->mcastPktsTxOK +
+ devTxStats->bcastPktsTxOK;
+ net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
+ devTxStats->mcastBytesTxOK +
+ devTxStats->bcastBytesTxOK;
+ net_stats->tx_errors += devTxStats->pktsTxError;
+ net_stats->tx_dropped += drvTxStats->drop_total;
+ }
- net_stats->rx_packets = devRxStats->ucastPktsRxOK +
- devRxStats->mcastPktsRxOK +
- devRxStats->bcastPktsRxOK;
-
- net_stats->tx_packets = devTxStats->ucastPktsTxOK +
- devTxStats->mcastPktsTxOK +
- devTxStats->bcastPktsTxOK;
-
- net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
- devRxStats->mcastBytesRxOK +
- devRxStats->bcastBytesRxOK;
-
- net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
- devTxStats->mcastBytesTxOK +
- devTxStats->bcastBytesTxOK;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ devRxStats = &adapter->rqd_start[i].stats;
+ drvRxStats = &adapter->rx_queue[i].stats;
+ net_stats->rx_packets += devRxStats->ucastPktsRxOK +
+ devRxStats->mcastPktsRxOK +
+ devRxStats->bcastPktsRxOK;
- net_stats->rx_errors = devRxStats->pktsRxError;
- net_stats->tx_errors = devTxStats->pktsTxError;
- net_stats->rx_dropped = drvRxStats->drop_total;
- net_stats->tx_dropped = drvTxStats->drop_total;
- net_stats->multicast = devRxStats->mcastPktsRxOK;
+ net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
+ devRxStats->mcastBytesRxOK +
+ devRxStats->bcastBytesRxOK;
+ net_stats->rx_errors += devRxStats->pktsRxError;
+ net_stats->rx_dropped += drvRxStats->drop_total;
+ net_stats->multicast += devRxStats->mcastPktsRxOK;
+ }
return net_stats;
}
@@ -307,24 +305,26 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 *base;
int i;
+ int j = 0;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
/* this does assume each counter is 64-bit wide */
+/* TODO change this for multiple queues */
- base = (u8 *)&adapter->tqd_start->stats;
+ base = (u8 *)&adapter->tqd_start[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
- base = (u8 *)&adapter->tx_queue.stats;
+ base = (u8 *)&adapter->tx_queue[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
- base = (u8 *)&adapter->rqd_start->stats;
+ base = (u8 *)&adapter->rqd_start[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
- base = (u8 *)&adapter->rx_queue.stats;
+ base = (u8 *)&adapter->rx_queue[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
@@ -339,6 +339,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p;
+ int i = 0;
memset(p, 0, vmxnet3_get_regs_len(netdev));
@@ -347,28 +348,29 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
/* Update vmxnet3_get_regs_len if we want to dump more registers */
/* make each ring use multiple of 16 bytes */
- buf[0] = adapter->tx_queue.tx_ring.next2fill;
- buf[1] = adapter->tx_queue.tx_ring.next2comp;
- buf[2] = adapter->tx_queue.tx_ring.gen;
+/* TODO change this for multiple queues */
+ buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
+ buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
+ buf[2] = adapter->tx_queue[i].tx_ring.gen;
buf[3] = 0;
- buf[4] = adapter->tx_queue.comp_ring.next2proc;
- buf[5] = adapter->tx_queue.comp_ring.gen;
- buf[6] = adapter->tx_queue.stopped;
+ buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
+ buf[5] = adapter->tx_queue[i].comp_ring.gen;
+ buf[6] = adapter->tx_queue[i].stopped;
buf[7] = 0;
- buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
- buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
- buf[10] = adapter->rx_queue.rx_ring[0].gen;
+ buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
+ buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
+ buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
buf[11] = 0;
- buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
- buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
- buf[14] = adapter->rx_queue.rx_ring[1].gen;
+ buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
+ buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
+ buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
buf[15] = 0;
- buf[16] = adapter->rx_queue.comp_ring.next2proc;
- buf[17] = adapter->rx_queue.comp_ring.gen;
+ buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
+ buf[17] = adapter->rx_queue[i].comp_ring.gen;
buf[18] = 0;
buf[19] = 0;
}
@@ -435,8 +437,10 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
- param->rx_pending = adapter->rx_queue.rx_ring[0].size;
- param->tx_pending = adapter->tx_queue.tx_ring.size;
+ param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
+ adapter->num_rx_queues;
+ param->tx_pending = adapter->tx_queue[0].tx_ring.size *
+ adapter->num_tx_queues;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
}
@@ -480,8 +484,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
sz) != 0)
return -EINVAL;
- if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
- new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
+ if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
+ new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
return 0;
}
@@ -498,11 +502,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
/* recreate the rx queue and the tx queue based on the
* new sizes */
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_tq_destroy_all(adapter);
+ vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+
if (err) {
/* failed, most likely because of OOM, try default
* size */
@@ -535,6 +540,65 @@ out:
}
+static int
+vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
+ void *rules)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = adapter->num_rx_queues;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+
+static int
+vmxnet3_get_rss_indir(struct net_device *netdev,
+ struct ethtool_rxfh_indir *p)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+
+ p->size = rssConf->indTableSize;
+ while (n--)
+ p->ring_index[n] = rssConf->indTable[n];
+ return 0;
+
+}
+
+static int
+vmxnet3_set_rss_indir(struct net_device *netdev,
+ const struct ethtool_rxfh_indir *p)
+{
+ unsigned int i;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ if (p->size != rssConf->indTableSize)
+ return -EINVAL;
+ for (i = 0; i < rssConf->indTableSize; i++) {
+ /*
+ * Return with error code if any of the queue indices
+ * is out of range
+ */
+ if (p->ring_index[i] < 0 ||
+ p->ring_index[i] >= adapter->num_rx_queues)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = p->ring_index[i];
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RSSIDT);
+
+ return 0;
+
+}
+
static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
@@ -558,6 +622,9 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
+ .get_rxnfc = vmxnet3_get_rxnfc,
+ .get_rxfh_indir = vmxnet3_get_rss_indir,
+ .set_rxfh_indir = vmxnet3_set_rss_indir,
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index edf228843afc..7fadeed37f03 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,11 +68,15 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
+#if defined(CONFIG_PCI_MSI)
+ /* RSS only makes sense if MSI-X is supported. */
+ #define VMXNET3_RSS
+#endif
/*
* Capabilities
@@ -218,16 +222,19 @@ struct vmxnet3_tx_ctx {
};
struct vmxnet3_tx_queue {
+ char name[IFNAMSIZ+8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
spinlock_t tx_lock;
struct vmxnet3_cmd_ring tx_ring;
- struct vmxnet3_tx_buf_info *buf_info;
+ struct vmxnet3_tx_buf_info *buf_info;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
- struct Vmxnet3_TxQueueCtrl *shared;
+ struct Vmxnet3_TxQueueCtrl *shared;
struct vmxnet3_tq_driver_stats stats;
bool stopped;
int num_stop; /* # of times the queue is
* stopped */
+ int qid;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
enum vmxnet3_rx_buf_type {
@@ -259,6 +266,9 @@ struct vmxnet3_rq_driver_stats {
};
struct vmxnet3_rx_queue {
+ char name[IFNAMSIZ + 8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
+ struct napi_struct napi;
struct vmxnet3_cmd_ring rx_ring[2];
struct vmxnet3_comp_ring comp_ring;
struct vmxnet3_rx_ctx rx_ctx;
@@ -271,7 +281,16 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-#define VMXNET3_LINUX_MAX_MSIX_VECT 1
+#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
+#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
+
+/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
+#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
+
+#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
+ VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
+#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
+
struct vmxnet3_intr {
enum vmxnet3_intr_mask_mode mask_mode;
@@ -279,27 +298,32 @@ struct vmxnet3_intr {
u8 num_intrs; /* # of intr vectors */
u8 event_intr_idx; /* idx of the intr vector for event */
u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
+ char event_msi_vector_name[IFNAMSIZ+11];
#ifdef CONFIG_PCI_MSI
struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
#endif
};
+/* Interrupt sharing schemes, share_intr */
+#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
+#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
+#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
+
+
#define VMXNET3_STATE_BIT_RESETTING 0
#define VMXNET3_STATE_BIT_QUIESCED 1
struct vmxnet3_adapter {
- struct vmxnet3_tx_queue tx_queue;
- struct vmxnet3_rx_queue rx_queue;
- struct napi_struct napi;
- struct vlan_group *vlan_grp;
-
- struct vmxnet3_intr intr;
-
- struct Vmxnet3_DriverShared *shared;
- struct Vmxnet3_PMConf *pm_conf;
- struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
- struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
- struct net_device *netdev;
- struct pci_dev *pdev;
+ struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
+ struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
+ struct vlan_group *vlan_grp;
+ struct vmxnet3_intr intr;
+ struct Vmxnet3_DriverShared *shared;
+ struct Vmxnet3_PMConf *pm_conf;
+ struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
+ struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
+ struct net_device *netdev;
+ struct net_device_stats net_stats;
+ struct pci_dev *pdev;
u8 __iomem *hw_addr0; /* for BAR 0 */
u8 __iomem *hw_addr1; /* for BAR 1 */
@@ -308,6 +332,12 @@ struct vmxnet3_adapter {
bool rxcsum;
bool lro;
bool jumbo_frame;
+#ifdef VMXNET3_RSS
+ struct UPT1_RSSConf *rss_conf;
+ bool rss;
+#endif
+ u32 num_rx_queues;
+ u32 num_tx_queues;
/* rx buffer related */
unsigned skb_buf_size;
@@ -327,6 +357,7 @@ struct vmxnet3_adapter {
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int dev_number;
+ int share_intr;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -366,12 +397,10 @@ void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
void
-vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
- struct vmxnet3_adapter *adapter);
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
void
-vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
- struct vmxnet3_adapter *adapter);
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3676b..409c2e6053d0 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,19 +19,7 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
-
-static enum vxge_hw_status
-__vxge_hw_fifo_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_fifo_attr *attr);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_abort(
- struct __vxge_hw_fifo *fifoh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_reset(
- struct __vxge_hw_fifo *ringh);
+#include "vxge-main.h"
static enum vxge_hw_status
__vxge_hw_fifo_delete(
@@ -71,53 +59,15 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
u32 size,
struct vxge_hw_mempool_dma *dma_object);
-
-static struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type, u32 length,
- u32 per_dtr_space, void *userdata);
-
static void
__vxge_hw_channel_free(
struct __vxge_hw_channel *channel);
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(
- struct __vxge_hw_channel *channel);
-
-static enum vxge_hw_status
-__vxge_hw_channel_reset(
- struct __vxge_hw_channel *channel);
-
static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-static enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-static void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
static enum vxge_hw_status
__vxge_hw_device_register_poll(
void __iomem *reg,
@@ -138,9 +88,10 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
static struct vxge_hw_mempool*
__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
- u32 item_size, u32 private_size, u32 items_initial,
- u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata);
+ u32 item_size, u32 private_size, u32 items_initial,
+ u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata);
+
static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
static enum vxge_hw_status
@@ -153,52 +104,353 @@ vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-static u64
-__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+static void
+vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
+{
+ u64 val64;
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+ val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+ writeq(val64, &vp_reg->rxmac_vcfg0);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
+ return;
+}
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+/*
+ * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
+ */
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_virtualpath *vpath;
+ u64 val64, rxd_count, rxd_spat;
+ int count = 0, total_count = 0;
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat);
+ vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+ /* Check that the ring controller for this vpath has enough free RxDs
+ * to send frames to the host. This is done by reading the
+ * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
+ * RXD_SPAT value for the vpath.
+ */
+ val64 = readq(&vp_reg->prc_cfg6);
+ rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
+ /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
+ * leg room.
+ */
+ rxd_spat *= 2;
+
+ do {
+ mdelay(1);
+
+ rxd_count = readq(&vp_reg->prc_rxd_doorbell);
+
+ /* Check that the ring controller for this vpath does
+ * not have any frame in its pipeline.
+ */
+ val64 = readq(&vp_reg->frm_in_progress_cnt);
+ if ((rxd_count <= rxd_spat) || (val64 > 0))
+ count = 0;
+ else
+ count++;
+ total_count++;
+ } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
+ (total_count < VXGE_HW_MAX_POLLING_COUNT));
+
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
+ __func__);
+
+ return total_count;
+}
+
+/* vxge_hw_device_wait_receive_idle - This function waits until all frames
+ * stored in the frame buffer for each vpath assigned to the given
+ * function (hldev) have been sent to the host.
+ */
+void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
+{
+ int i, total_count = 0;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
+
+ total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ break;
+ }
+}
static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
+ u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
+ u64 *steer_ctrl)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ enum vxge_hw_status status;
+ u64 val64;
+ u32 retry = 0, max_retry = 100;
+
+ vp_reg = vpath->vp_reg;
+
+ if (vpath->vp_open) {
+ max_retry = 3;
+ spin_lock(&vpath->lock);
+ }
+
+ writeq(*data0, &vp_reg->rts_access_steer_data0);
+ writeq(*data1, &vp_reg->rts_access_steer_data1);
+ wmb();
+
+ val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
+ *steer_ctrl;
+
+ status = __vxge_hw_pio_mem_write64(val64,
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+ /* The __vxge_hw_device_register_poll can udelay for a significant
+ * amount of time, blocking other proccess from the CPU. If it delays
+ * for ~5secs, a NMI error can occur. A way around this is to give up
+ * the processor via msleep, but this is not allowed is under lock.
+ * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
+ * 1sec and sleep for 10ms until the firmware operation has completed
+ * or timed-out.
+ */
+ while ((status != VXGE_HW_OK) && retry++ < max_retry) {
+ if (!vpath->vp_open)
+ msleep(20);
+ status = __vxge_hw_device_register_poll(
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ }
+
+ if (status != VXGE_HW_OK)
+ goto out;
+
+ val64 = readq(&vp_reg->rts_access_steer_ctrl);
+ if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+ *data0 = readq(&vp_reg->rts_access_steer_data0);
+ *data1 = readq(&vp_reg->rts_access_steer_data1);
+ *steer_ctrl = val64;
+ } else
+ status = VXGE_HW_FAIL;
+
+out:
+ if (vpath->vp_open)
+ spin_unlock(&vpath->lock);
+ return status;
+}
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_READ,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+ return status;
+}
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ u32 ret;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
+ goto exit;
+ }
+
+ ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
+ if (ret != 1) {
+ vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
+ __func__, ret);
+ status = VXGE_HW_FAIL;
+ }
+
+exit:
+ return status;
+}
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int ret_code, sec_code;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ /* send upgrade start command */
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_START,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
+ __func__);
+ return status;
+ }
+
+ /* Transfer fw image to adapter 16 bytes at a time */
+ for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
+ steer_ctrl = 0;
+
+ /* The next 128bits of fwdata to be loaded onto the adapter */
+ data0 = *((u64 *)fwdata);
+ data1 = *((u64 *)fwdata + 1);
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_SEND,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
+ __func__);
+ goto out;
+ }
+
+ ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
+ switch (ret_code) {
+ case VXGE_HW_FW_UPGRADE_OK:
+ /* All OK, send next 16 bytes. */
+ break;
+ case VXGE_FW_UPGRADE_BYTES2SKIP:
+ /* skip bytes in the stream */
+ fwdata += (data0 >> 8) & 0xFFFFFFFF;
+ break;
+ case VXGE_HW_FW_UPGRADE_DONE:
+ goto out;
+ case VXGE_HW_FW_UPGRADE_ERR:
+ sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
+ switch (sec_code) {
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
+ printk(KERN_ERR
+ "corrupted data from .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
+ printk(KERN_ERR "invalid .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
+ printk(KERN_ERR "buffer overflow\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
+ printk(KERN_ERR "failed to flash the image\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
+ printk(KERN_ERR
+ "generic error. Unknown error type\n");
+ break;
+ default:
+ printk(KERN_ERR "Unknown error of type %d\n",
+ sec_code);
+ break;
+ }
+ status = VXGE_HW_FAIL;
+ goto out;
+ default:
+ printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
+ status = VXGE_HW_FAIL;
+ goto out;
+ }
+ /* point to next 16 bytes */
+ fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
+ }
+out:
+ return status;
+}
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *img)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int i;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ VXGE_HW_FW_API_GET_EPROM_REV,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ break;
+
+ img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
+ img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
+ img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
+ img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
+ }
+
+ return status;
+}
/*
* __vxge_hw_channel_allocate - Allocate memory for channel
* This function allocates required memory for the channel and various arrays
* in the channel
*/
-struct __vxge_hw_channel*
+static struct __vxge_hw_channel *
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
enum __vxge_hw_channel_type type,
u32 length, u32 per_dtr_space, void *userdata)
@@ -269,7 +521,7 @@ exit0:
* This function deallocates memory from the channel and various arrays
* in the channel
*/
-void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
{
kfree(channel->work_arr);
kfree(channel->free_arr);
@@ -283,7 +535,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
* This function initializes a channel by properly setting the
* various references
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
{
u32 i;
@@ -318,7 +570,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
* __vxge_hw_channel_reset - Resets a channel
* This function resets a channel by properly setting the various references
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
{
u32 i;
@@ -345,8 +597,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
* Initialize certain PCI/PCI-X configuration registers
* with recommended values. Save config space for future hw resets.
*/
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
{
u16 cmd = 0;
@@ -390,7 +641,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
return ret;
}
- /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
+/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
@@ -435,7 +686,7 @@ exit:
* register location pointers in the device object. It waits until the ric is
* completed initializing registers.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
{
u64 val64;
@@ -496,26 +747,6 @@ exit:
}
/*
- * __vxge_hw_device_id_get
- * This routine returns sets the device id and revision numbers into the device
- * structure
- */
-void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
-
- val64 = readq(&hldev->common_reg->titan_asic_id);
- hldev->device_id =
- (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
-
- hldev->major_revision =
- (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
-
- hldev->minor_revision =
- (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-}
-
-/*
* __vxge_hw_device_access_rights_get: Get Access Rights of the driver
* This routine returns the Access Rights of the driver
*/
@@ -568,10 +799,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
}
/*
+ * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
+ * Returns the function number of the vpath.
+ */
+static u32
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
+{
+ u64 val64;
+
+ val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
+
+ return
+ (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
+}
+
+/*
* __vxge_hw_device_host_info_get
* This routine returns the host type assignments
*/
-void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
{
u64 val64;
u32 i;
@@ -584,16 +830,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
hldev->func_id =
- __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
+ __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
hldev->access_rights = __vxge_hw_device_access_rights_get(
hldev->host_type, hldev->func_id);
+ hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
+ hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
+
hldev->first_vp_id = i;
break;
}
@@ -634,7 +882,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
* __vxge_hw_device_initialize
* Initialize Titan-V hardware.
*/
-enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -650,6 +899,196 @@ exit:
return status;
}
+/*
+ * __vxge_hw_vpath_fw_ver_get - Get the fw version
+ * Returns FW Version
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
+ struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+ struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+ struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ fw_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
+ fw_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
+ fw_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
+
+ snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ fw_date->month, fw_date->day, fw_date->year);
+
+ fw_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ fw_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ fw_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+ snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ fw_version->major, fw_version->minor, fw_version->build);
+
+ flash_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
+ flash_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
+ flash_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
+
+ snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ flash_date->month, flash_date->day, flash_date->year);
+
+ flash_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
+ flash_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
+ flash_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
+
+ snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ flash_version->major, flash_version->minor,
+ flash_version->build);
+
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_card_info_get - Get the serial numbers,
+ * part number and product description.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ enum vxge_hw_status status;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ u8 *serial_number = hw_info->serial_number;
+ u8 *part_number = hw_info->part_number;
+ u8 *product_desc = hw_info->product_desc;
+ u32 i, j = 0;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)serial_number)[0] = be64_to_cpu(data0);
+ ((u64 *)serial_number)[1] = be64_to_cpu(data1);
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)part_number)[0] = be64_to_cpu(data0);
+ ((u64 *)part_number)[1] = be64_to_cpu(data1);
+
+ for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
+ i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+ data0 = i;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+ }
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
+ * Returns pci function mode
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ data0 = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_API_GET_FUNC_MODE,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
+ * from MAC address table.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
+ u8 *macaddr, u8 *macaddr_mask)
+{
+ u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+ int i;
+
+ do {
+ status = vxge_hw_vpath_fw_api(vpath, action,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
+ data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
+ data1);
+
+ for (i = ETH_ALEN; i > 0; i--) {
+ macaddr[i - 1] = (u8) (data0 & 0xFF);
+ data0 >>= 8;
+
+ macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
+ data1 >>= 8;
+ }
+
+ action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+
+ } while (!is_valid_ether_addr(macaddr));
+exit:
+ return status;
+}
+
/**
* vxge_hw_device_hw_info_get - Get the hw information
* Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1104,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
struct vxge_hw_toc_reg __iomem *toc;
struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_vpath_reg __iomem *vpath_reg;
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
enum vxge_hw_status status;
+ struct __vxge_hw_virtualpath vpath;
memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
@@ -702,7 +1141,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
(bar0 + val64);
- hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
+ hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
if (__vxge_hw_device_access_rights_get(hw_info->host_type,
hw_info->func_id) &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1157,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+ vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+ (bar0 + val64);
+ vpath.vp_open = 0;
- hw_info->function_mode =
- __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
+ status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
+ if (status != VXGE_HW_OK)
+ goto exit;
- status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
+ status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
- status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
+ status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
@@ -735,14 +1177,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
}
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
continue;
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+ vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+ (bar0 + val64);
+ vpath.vp_open = 0;
- status = __vxge_hw_vpath_addr_get(i, vpath_reg,
+ status = __vxge_hw_vpath_addr_get(&vpath,
hw_info->mac_addrs[i],
hw_info->mac_addr_masks[i]);
if (status != VXGE_HW_OK)
@@ -806,7 +1249,6 @@ vxge_hw_device_initialize(
vfree(hldev);
goto exit;
}
- __vxge_hw_device_id_get(hldev);
__vxge_hw_device_host_info_get(hldev);
@@ -814,7 +1256,6 @@ vxge_hw_device_initialize(
nblocks++;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
@@ -839,7 +1280,6 @@ vxge_hw_device_initialize(
}
status = __vxge_hw_device_initialize(hldev);
-
if (status != VXGE_HW_OK) {
vxge_hw_device_terminate(hldev);
goto exit;
@@ -876,7 +1316,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
(hldev->virtual_paths[i].vp_open ==
VXGE_HW_VP_NOT_OPEN))
@@ -1165,7 +1604,6 @@ exit:
* It can be used to set or reset Pause frame generation or reception
* support of the NIC.
*/
-
enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
u32 port, u32 tx, u32 rx)
{
@@ -1409,7 +1847,6 @@ exit:
/*
* __vxge_hw_ring_create - Create a Ring
* This function creates Ring and initializes it.
- *
*/
static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
@@ -1845,7 +2282,7 @@ static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
* __vxge_hw_device_fifo_config_check - Check fifo configuration.
* Check the fifo configuration
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
{
if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
@@ -1893,7 +2330,7 @@ __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
* __vxge_hw_device_config_check - Check device configuration.
* Check the device configuration
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
{
u32 i;
@@ -2453,7 +2890,7 @@ __vxge_hw_fifo_mempool_item_alloc(
* __vxge_hw_fifo_create - Create a FIFO
* This function creates FIFO and initializes it.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_fifo_attr *attr)
{
@@ -2617,7 +3054,8 @@ static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
* __vxge_hw_fifo_delete - Removes the FIFO
* This function freeup the memory pool and removes the FIFO
*/
-enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
@@ -2675,297 +3113,6 @@ exit:
return status;
}
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id,
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
- u64 val64;
-
- val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
- return
- (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_read_rts_ds - Program RTS steering critieria
- */
-static inline void
-__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u64 dta_struct_sel)
-{
- writeq(0, &vpath_reg->rts_access_steer_ctrl);
- wmb();
- writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
- writeq(0, &vpath_reg->rts_access_steer_data1);
- wmb();
-}
-
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u32 i, j;
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
- u8 *serial_number = hw_info->serial_number;
- u8 *part_number = hw_info->part_number;
- u8 *product_desc = hw_info->product_desc;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)serial_number)[0] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)serial_number)[1] = be64_to_cpu(data2);
- status = VXGE_HW_OK;
- } else
- *serial_number = 0;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)part_number)[0] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)part_number)[1] = be64_to_cpu(data2);
-
- status = VXGE_HW_OK;
-
- } else
- *part_number = 0;
-
- j = 0;
-
- for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
- i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
-
- __vxge_hw_read_rts_ds(vpath_reg, i);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
-
- status = VXGE_HW_OK;
- } else
- *product_desc = 0;
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
- struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
- struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
- struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- data2 = readq(&vpath_reg->rts_access_steer_data1);
-
- fw_date->day =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
- data1);
- fw_date->month =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
- data1);
- fw_date->year =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
- data1);
-
- snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- fw_date->month, fw_date->day, fw_date->year);
-
- fw_version->major =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
- fw_version->minor =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
- fw_version->build =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
-
- snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- fw_version->major, fw_version->minor, fw_version->build);
-
- flash_date->day =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
- flash_date->month =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
- flash_date->year =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
-
- snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
- "%2.2d/%2.2d/%4.4d",
- flash_date->month, flash_date->day, flash_date->year);
-
- flash_version->major =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
- flash_version->minor =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
- flash_version->build =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
-
- snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- flash_version->major, flash_version->minor,
- flash_version->build);
-
- status = VXGE_HW_OK;
-
- } else
- status = VXGE_HW_FAIL;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static u64
-__vxge_hw_vpath_pci_func_mode_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
- u64 val64;
- u64 data1 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- status = VXGE_HW_OK;
- } else {
- data1 = 0;
- status = VXGE_HW_FAIL;
- }
-exit:
- return data1;
-}
-
/**
* vxge_hw_device_flick_link_led - Flick (blink) link LED.
* @hldev: HW device.
@@ -2974,37 +3121,24 @@ exit:
* Flicker the link LED.
*/
enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
- u64 on_off)
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_virtualpath *vpath;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
if (hldev == NULL) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
- vp_reg = hldev->vpath_reg[hldev->first_vp_id];
-
- writeq(0, &vp_reg->rts_access_steer_ctrl);
- wmb();
- writeq(on_off, &vp_reg->rts_access_steer_data0);
- writeq(0, &vp_reg->rts_access_steer_data1);
- wmb();
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ data0 = on_off;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
exit:
return status;
}
@@ -3013,63 +3147,38 @@ exit:
* __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
*/
enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
- struct __vxge_hw_vpath_handle *vp,
- u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
+__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
+ u32 action, u32 rts_table, u32 offset,
+ u64 *data0, u64 *data1)
{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- enum vxge_hw_status status = VXGE_HW_OK;
+ enum vxge_hw_status status;
+ u64 steer_ctrl = 0;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
- vpath = vp->vpath;
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
if ((rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
(rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
(rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
(rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
- val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
+ steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
}
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- vpath->hldev->config.device_poll_millis);
-
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ data0, data1, &steer_ctrl);
if (status != VXGE_HW_OK)
goto exit;
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- *data1 = readq(&vp_reg->rts_access_steer_data0);
-
- if ((rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
- *data2 = readq(&vp_reg->rts_access_steer_data1);
- }
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_FAIL;
+ if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+ (rts_table !=
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ *data1 = 0;
exit:
return status;
}
@@ -3078,107 +3187,27 @@ exit:
* __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
*/
enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
- struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
- u32 offset, u64 data1, u64 data2)
+__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
+ u32 rts_table, u32 offset, u64 steer_data0,
+ u64 steer_data1)
{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
- vpath = vp->vpath;
- vp_reg = vpath->vp_reg;
-
- writeq(data1, &vp_reg->rts_access_steer_data0);
- wmb();
+ data0 = steer_data0;
if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
(rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
- writeq(data2, &vp_reg->rts_access_steer_data1);
- wmb();
- }
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- vpath->hldev->config.device_poll_millis);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
- status = VXGE_HW_OK;
- else
- status = VXGE_HW_FAIL;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- * from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
- u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
-{
- u32 i;
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ data1 = steer_data1;
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- data2 = readq(&vpath_reg->rts_access_steer_data1);
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
- data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_FAIL;
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ &data0, &data1, &steer_ctrl);
exit:
return status;
}
@@ -3204,6 +3233,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
0, &data0, &data1);
+ if (status != VXGE_HW_OK)
+ goto exit;
data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -4117,6 +4148,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
vpath = &hldev->virtual_paths[vp_id];
+ spin_lock_init(&hldev->virtual_paths[vp_id].lock);
vpath->vp_id = vp_id;
vpath->vp_open = VXGE_HW_VP_OPEN;
vpath->hldev = hldev;
@@ -4127,14 +4159,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
__vxge_hw_vpath_reset(hldev, vp_id);
status = __vxge_hw_vpath_reset_check(vpath);
-
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
}
status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
-
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
@@ -4148,7 +4178,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
hldev->tim_int_mask1, vp_id);
status = __vxge_hw_vpath_initialize(hldev, vp_id);
-
if (status != VXGE_HW_OK)
__vxge_hw_vp_terminate(hldev, vp_id);
exit:
@@ -4335,16 +4364,18 @@ vpath_open_exit1:
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
{
- struct __vxge_hw_virtualpath *vpath = NULL;
+ struct __vxge_hw_virtualpath *vpath = vp->vpath;
+ struct __vxge_hw_ring *ring = vpath->ringh;
+ struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
u64 new_count, val64, val164;
- struct __vxge_hw_ring *ring;
- vpath = vp->vpath;
- ring = vpath->ringh;
+ if (vdev->titan1) {
+ new_count = readq(&vpath->vp_reg->rxdmem_size);
+ new_count &= 0x1fff;
+ } else
+ new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
- new_count = readq(&vpath->vp_reg->rxdmem_size);
- new_count &= 0x1fff;
- val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
+ val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
&vpath->vp_reg->prc_rxd_doorbell);
@@ -4414,7 +4445,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_vp_terminate(devh, vp_id);
+ spin_lock(&vpath->lock);
vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+ spin_unlock(&vpath->lock);
vpath_close_exit:
return status;
@@ -4810,7 +4843,7 @@ static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
* __vxge_hw_blockpool_create - Create block pool
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool *blockpool,
u32 pool_size,
@@ -4910,7 +4943,7 @@ blockpool_create_exit:
* __vxge_hw_blockpool_destroy - Deallocates the block pool
*/
-void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
{
struct __vxge_hw_device *hldev;
@@ -5076,7 +5109,7 @@ exit:
* Allocates a block of memory of given size, either from block pool
* or by calling vxge_os_dma_malloc()
*/
-void *
+static void *
__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
struct vxge_hw_mempool_dma *dma_object)
{
@@ -5140,7 +5173,7 @@ exit:
* __vxge_hw_blockpool_free - Frees the memory allcoated with
__vxge_hw_blockpool_malloc
*/
-void
+static void
__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
void *memblock, u32 size,
struct vxge_hw_mempool_dma *dma_object)
@@ -5192,7 +5225,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
* __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
* This function allocates a block from block pool or from the system
*/
-struct __vxge_hw_blockpool_entry *
+static struct __vxge_hw_blockpool_entry *
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
{
struct __vxge_hw_blockpool_entry *entry = NULL;
@@ -5227,7 +5260,7 @@ __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
*
* This function frees a block from block pool
*/
-void
+static void
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
struct __vxge_hw_blockpool_entry *entry)
{
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861b6c2c..5b2c8313426d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
#define VXGE_CACHE_LINE_SIZE 128
#endif
-#define vxge_os_vaprintf(level, mask, fmt, ...) { \
- char buff[255]; \
- snprintf(buff, 255, fmt, __VA_ARGS__); \
- printk(buff); \
- printk("\n"); \
-}
-
#ifndef VXGE_ALIGN
#define VXGE_ALIGN(adrs, size) \
(((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
#define VXGE_HW_MAX_MTU 9600
#define VXGE_HW_DEFAULT_MTU 1500
-#ifdef VXGE_DEBUG_ASSERT
+#define VXGE_HW_MAX_ROM_IMAGES 8
+
+struct eprom_image {
+ u8 is_valid:1;
+ u8 index;
+ u8 type;
+ u16 version;
+};
+#ifdef VXGE_DEBUG_ASSERT
/**
* vxge_assert
* @test: C-condition to check
@@ -48,16 +49,13 @@
* compilation
* time.
*/
-#define vxge_assert(test) { \
- if (!(test)) \
- vxge_os_bug("bad cond: "#test" at %s:%d\n", \
- __FILE__, __LINE__); }
+#define vxge_assert(test) BUG_ON(!(test))
#else
#define vxge_assert(test)
#endif /* end of VXGE_DEBUG_ASSERT */
/**
- * enum enum vxge_debug_level
+ * enum vxge_debug_level
* @VXGE_NONE: debug disabled
* @VXGE_ERR: all errors going to be logged out
* @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state {
};
/**
+ * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
+ * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
+ * @VXGE_HW_FW_UPGRADE_DONE: upload completed
+ * @VXGE_HW_FW_UPGRADE_ERR: upload error
+ * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
+ *
+ */
+enum vxge_hw_fw_upgrade_code {
+ VXGE_HW_FW_UPGRADE_OK = 0,
+ VXGE_HW_FW_UPGRADE_DONE = 1,
+ VXGE_HW_FW_UPGRADE_ERR = 2,
+ VXGE_FW_UPGRADE_BYTES2SKIP = 3
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
+ * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
+ */
+enum vxge_hw_fw_upgrade_err_code {
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
+ VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
+ VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
+ VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
+};
+
+/**
* struct vxge_hw_device_date - Date Format
* @day: Day
* @month: Month
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
* See also: vxge_hw_driver_initialize().
*/
struct vxge_hw_uld_cbs {
-
void (*link_up)(struct __vxge_hw_device *devh);
void (*link_down)(struct __vxge_hw_device *devh);
void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
struct vxge_hw_vpath_stats_hw_info *hw_stats;
struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
struct vxge_hw_vpath_stats_sw_info *sw_stats;
+ spinlock_t lock;
};
/*
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
/**
* struct __vxge_hw_device - Hal device object
* @magic: Magic Number
- * @device_id: PCI Device Id of the adapter
- * @major_revision: PCI Device major revision
- * @minor_revision: PCI Device minor revision
* @bar0: BAR0 virtual address.
* @pdev: Physical device handle
* @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
u32 magic;
#define VXGE_HW_DEVICE_MAGIC 0x12345678
#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
- u16 device_id;
- u8 major_revision;
- u8 minor_revision;
void __iomem *bar0;
struct pci_dev *pdev;
struct net_device *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
u32 debug_level;
u32 level_err;
u32 level_trace;
+ u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
};
#define VXGE_HW_INFO_LEN 64
@@ -1413,12 +1447,12 @@ enum vxge_hw_rth_algoritms {
* See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
*/
struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en;
- u8 hash_type_ipv4_en;
- u8 hash_type_tcpipv6_en;
- u8 hash_type_ipv6_en;
- u8 hash_type_tcpipv6ex_en;
- u8 hash_type_ipv6ex_en;
+ u8 hash_type_tcpipv4_en:1,
+ hash_type_ipv4_en:1,
+ hash_type_tcpipv6_en:1,
+ hash_type_ipv6_en:1,
+ hash_type_tcpipv6ex_en:1,
+ hash_type_ipv6ex_en:1;
};
void vxge_hw_device_debug_set(
@@ -2000,7 +2034,7 @@ enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
/**
- * vxge_debug
+ * vxge_debug_ll
* @level: level of debug verbosity.
* @mask: mask for the debug
* @buf: Circular buffer for tracing
@@ -2012,26 +2046,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
* may be compiled out if DEBUG macro was never defined.
* See also: enum vxge_debug_level{}.
*/
-
-#define vxge_trace_aux(level, mask, fmt, ...) \
-{\
- vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
-}
-
-#define vxge_debug(module, level, mask, fmt, ...) { \
-if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
- (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
- if ((mask & VXGE_DEBUG_MASK) == mask)\
- vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
-} \
-}
-
#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) \
-{\
- vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
-}
-
+#define vxge_debug_ll(level, mask, fmt, ...) do { \
+ if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
+ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
+ if ((mask & VXGE_DEBUG_MASK) == mask) \
+ printk(fmt "\n", __VA_ARGS__); \
+} while (0)
#else
#define vxge_debug_ll(level, mask, fmt, ...)
#endif
@@ -2051,4 +2072,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
enum vxge_hw_status
__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
+
+#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
+#define VXGE_HW_MAX_POLLING_COUNT 100
+
+void
+vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build);
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
+ int size);
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *eprom_image_data);
+
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746eef923..bc9bd1035706 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
* Virtualized Server Adapter.
* Copyright(c) 2002-2010 Exar Corp.
******************************************************************************/
-#include<linux/ethtool.h>
+#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
* Return value:
* 0 on success.
*/
-
static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
{
/* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
* Returns driver specefic information like name, version etc.. to ethtool.
*/
static void vxge_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
- struct vxgedev *vdev;
- vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
* buffer area.
*/
static void vxge_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
+ struct ethtool_regs *regs, void *space)
{
int index, offset;
enum vxge_hw_status status;
u64 reg;
- u64 *reg_space = (u64 *) space;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ u64 *reg_space = (u64 *)space;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev,
*/
static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
* void
*/
static void vxge_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
+ struct ethtool_pauseparam *ep)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
}
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
* int, returns 0 on Success
*/
static int vxge_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
+ struct ethtool_pauseparam *ep)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
enum vxge_hw_status status;
enum vxge_hw_status swstatus;
struct vxge_vpath *vpath = NULL;
-
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
struct vxge_hw_xmac_stats *xmac_stats;
struct vxge_hw_device_stats_sw_info *sw_stats;
struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
kfree(hw_stats);
}
-static void vxge_ethtool_get_strings(struct net_device *dev,
- u32 stringset, u8 *data)
+static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
{
int stat_size = 0;
int i, j;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
switch (stringset) {
case ETH_SS_STATS:
vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev,
static int vxge_ethtool_get_regs_len(struct net_device *dev)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
}
static u32 vxge_get_rx_csum(struct net_device *dev)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
return vdev->rx_csum;
}
static int vxge_set_rx_csum(struct net_device *dev, u32 data)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
if (data)
vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
}
}
+static int vxge_set_flags(struct net_device *dev, u32 data)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ enum vxge_hw_status status;
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
+ return 0;
+
+ if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
+ return -EINVAL;
+
+ vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
+
+ /* Enabling RTH requires some of the logic in vxge_device_register and a
+ * vpath reset. Due to these restrictions, only allow modification
+ * while the interface is down.
+ */
+ status = vxge_reset_all_vpaths(vdev);
+ if (status != VXGE_HW_OK) {
+ vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
+ return -EFAULT;
+ }
+
+ if (vdev->devh->config.rth_en)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ return 0;
+}
+
+static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
+ printk(KERN_INFO "Single Function Mode is required to flash the"
+ " firmware\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(dev)) {
+ printk(KERN_INFO "Interface %s must be down to flash the "
+ "firmware\n", dev->name);
+ return -EBUSY;
+ }
+
+ return vxge_fw_upgrade(vdev, parms->data, 1);
+}
+
static const struct ethtool_ops vxge_ethtool_ops = {
.get_settings = vxge_ethtool_gset,
.set_settings = vxge_ethtool_sset,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.phys_id = vxge_ethtool_idnic,
.get_sset_count = vxge_ethtool_get_sset_count,
.get_ethtool_stats = vxge_get_ethtool_stats,
+ .set_flags = vxge_set_flags,
+ .flash_device = vxge_fw_flash,
};
void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f3d024..5cba4a684f08 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
#include <net/ip.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
#include "vxge-main.h"
#include "vxge-reg.h"
@@ -90,7 +92,6 @@ static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
@@ -152,7 +153,7 @@ static void
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
vdev->ndev->name, __func__, __LINE__);
@@ -176,7 +177,7 @@ static void
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -369,7 +370,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct net_device *dev = ring->ndev;
+ struct net_device *dev = ring->ndev;
unsigned int dma_sizes;
void *first_dtr = NULL;
int dtr_cnt = 0;
@@ -513,6 +514,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
else
skb_checksum_none_assert(skb);
+
+ if (ring->rx_hwts) {
+ struct skb_shared_hwtstamps *skb_hwts;
+ u32 ns = *(u32 *)(skb->head + pkt_length);
+
+ skb_hwts = skb_hwtstamps(skb);
+ skb_hwts->hwtstamp = ns_to_ktime(ns);
+ skb_hwts->syststamp.tv64 = 0;
+ }
+
+ /* rth_hash_type and rth_it_hit are non-zero regardless of
+ * whether rss is enabled. Only the rth_value is zero/non-zero
+ * if rss is disabled/enabled, so key off of that.
+ */
+ if (ext_info.rth_value)
+ skb->rxhash = ext_info.rth_value;
+
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -670,7 +688,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
struct vxge_vpath *vpath = NULL;
struct __vxge_hw_device *hldev;
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +787,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
if (unlikely(!is_vxge_card_up(vdev))) {
vxge_debug_tx(VXGE_ERR,
@@ -1034,7 +1052,7 @@ static void vxge_set_multicast(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
hldev = (struct __vxge_hw_device *)vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1112,7 @@ static void vxge_set_multicast(struct net_device *dev)
/* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) {
list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *) entry;
+ mac_entry = (struct vxge_mac_addrs *)entry;
/* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1155,7 @@ _set_all_mcast:
/* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) {
list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *) entry;
+ mac_entry = (struct vxge_mac_addrs *)entry;
/* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1202,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info_new, mac_info_old;
int vpath_idx = 0;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
hldev = vdev->devh;
if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1310,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+ struct __vxge_hw_device *hldev;
int msix_id;
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
vxge_hw_vpath_intr_disable(vpath->handle);
if (vdev->config.intr_type == INTA)
@@ -1423,6 +1446,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
}
if (event == VXGE_LL_FULL_RESET) {
+ vxge_hw_device_wait_receive_idle(vdev->devh);
vxge_hw_device_intr_disable(vdev->devh);
switch (vdev->cric_err_event) {
@@ -1608,8 +1632,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
int budget_org = budget;
struct vxge_ring *ring;
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring;
@@ -1645,11 +1668,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
*/
static void vxge_netpoll(struct net_device *dev)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
struct vxgedev *vdev;
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
@@ -1689,15 +1712,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
mtable[index] = index % vdev->no_of_vpath;
}
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
/* set indirection table, bucket-to-vpath mapping */
status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
vdev->no_of_vpath,
@@ -1710,12 +1724,21 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
+ /* Fill RTH hash types */
+ hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
+ hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
+ hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
+ hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
+ hash_types.hash_type_tcpipv6ex_en =
+ vdev->config.rth_hash_type_tcpipv6ex;
+ hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
+
/*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
+ * Because the itable_set() method uses the active_table field
+ * for the target virtual path the RTH config should be updated
+ * for all VPATHs. The h/w only uses the lowest numbered VPATH
+ * when steering frames.
+ */
for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
@@ -1797,7 +1820,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
- u8 *mac_address = (u8 *) (&del_mac);
+ u8 *mac_address = (u8 *)(&del_mac);
/* Copy the mac address to delete from the list */
memcpy(mac_address, mac->macaddr, ETH_ALEN);
@@ -1928,7 +1951,7 @@ static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
}
/* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1988,8 +2011,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
-
vxge_assert(vpath->is_configured);
+
+ if (!vdev->titan1) {
+ struct vxge_hw_vp_config *vcfg;
+ vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+ vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+ vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+ vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+ vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+ vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+ vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+ vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+ vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+ vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+ }
+
attr.vp_id = vpath->device_id;
attr.fifo_attr.callback = vxge_xmit_compl;
attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2024,6 +2062,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vdev->config.fifo_indicate_max_pkts;
vpath->ring.rx_vector_no = 0;
vpath->ring.rx_csum = vdev->rx_csum;
+ vpath->ring.rx_hwts = vdev->rx_hwts;
vpath->is_open = 1;
vdev->vp_handles[i] = vpath->handle;
vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2062,18 +2101,18 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
struct __vxge_hw_device *hldev;
u64 reason;
enum vxge_hw_status status;
- struct vxgedev *vdev = (struct vxgedev *) dev_id;;
+ struct vxgedev *vdev = (struct vxgedev *)dev_id;
vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
dev = vdev->ndev;
- hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+ hldev = pci_get_drvdata(vdev->pdev);
if (pci_channel_offline(vdev->pdev))
return IRQ_NONE;
if (unlikely(!is_vxge_card_up(vdev)))
- return IRQ_NONE;
+ return IRQ_HANDLED;
status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
&reason);
@@ -2301,8 +2340,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ struct __vxge_hw_device *hldev;
+ hldev = pci_get_drvdata(vdev->pdev);
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
@@ -2542,8 +2581,8 @@ vxge_open(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", dev->name, __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
function_mode = vdev->config.device_hw_info.function_mode;
/* make sure you have link off by default every time Nic is
@@ -2598,6 +2637,8 @@ vxge_open(struct net_device *dev)
goto out2;
}
}
+ printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+ hldev->config.rth_en ? "enabled" : "disabled");
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
@@ -2683,9 +2724,10 @@ vxge_open(struct net_device *dev)
vxge_os_timer(vdev->vp_reset_timer,
vxge_poll_vp_reset, vdev, (HZ/2));
- if (vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(vdev->vp_lockup_timer,
- vxge_poll_vp_lockup, vdev, (HZ/2));
+ /* There is no need to check for RxD leak and RxD lookup on Titan1A */
+ if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+ vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+ HZ / 2);
set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -2767,8 +2809,8 @@ static int do_vxge_close(struct net_device *dev, int do_io)
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
dev->name, __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
if (unlikely(!is_vxge_card_up(vdev)))
return 0;
@@ -2778,7 +2820,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
msleep(50);
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
if (do_io) {
/* Put the vpath back in normal mode */
vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2818,10 +2859,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
smp_wmb();
}
- del_timer_sync(&vdev->vp_lockup_timer);
+
+ if (vdev->titan1)
+ del_timer_sync(&vdev->vp_lockup_timer);
del_timer_sync(&vdev->vp_reset_timer);
+ if (do_io)
+ vxge_hw_device_wait_receive_idle(hldev);
+
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
/* Disable napi */
if (vdev->config.intr_type != MSI_X)
napi_disable(&vdev->napi);
@@ -2838,8 +2886,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
if (do_io)
vxge_hw_device_intr_disable(vdev->devh);
- mdelay(1000);
-
vxge_rem_isr(vdev);
vxge_napi_del_all(vdev);
@@ -2954,6 +3000,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
return net_stats;
}
+static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
+ int enable)
+{
+ enum vxge_hw_status status;
+ u64 val64;
+
+ /* Timestamp is passed to the driver via the FCS, therefore we
+ * must disable the FCS stripping by the adapter. Since this is
+ * required for the driver to load (due to a hardware bug),
+ * there is no need to do anything special here.
+ */
+ if (enable)
+ val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+ VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+ VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+ else
+ val64 = 0;
+
+ status = vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ offsetof(struct vxge_hw_mrpcim_reg,
+ xmac_timestamp),
+ val64);
+ vxge_hw_device_flush_io(vdev->devh);
+ return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+ enum vxge_hw_status status;
+ int i;
+
+ if (copy_from_user(&config, data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* Transmit HW Timestamp not supported */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ status = vxge_timestamp_config(vdev, 0);
+ if (status != VXGE_HW_OK)
+ return -EFAULT;
+
+ vdev->rx_hwts = 0;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ status = vxge_timestamp_config(vdev, 1);
+ if (status != VXGE_HW_OK)
+ return -EFAULT;
+
+ vdev->rx_hwts = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
/**
* vxge_ioctl
* @dev: Device pointer.
@@ -2966,7 +3107,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
*/
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- return -EOPNOTSUPP;
+ struct vxgedev *vdev = netdev_priv(dev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
/**
@@ -2984,7 +3138,7 @@ vxge_tx_watchdog(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
@@ -3012,7 +3166,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vpath = &vdev->vpaths[0];
if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3215,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct vxge_vpath *vpath;
int vp_id;
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
/* Add these vlan to the vid table */
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3242,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vlan_group_set_device(vdev->vlgrp, vid, NULL);
@@ -3125,6 +3279,19 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
+static int __devinit vxge_device_revision(struct vxgedev *vdev)
+{
+ int ret;
+ u8 revision;
+
+ ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
+ if (ret)
+ return -EIO;
+
+ vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
+ return 0;
+}
+
static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct vxge_config *config,
int high_dma, int no_of_vpath,
@@ -3163,6 +3330,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vdev->pdev = hldev->pdev;
memcpy(&vdev->config, config, sizeof(struct vxge_config));
vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
+ vdev->rx_hwts = 0;
+
+ ret = vxge_device_revision(vdev);
+ if (ret < 0)
+ goto _out1;
SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
@@ -3178,6 +3350,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vxge_initialize_ethtool_ops(ndev);
+ if (vdev->config.rth_steering != NO_STEERING) {
+ ndev->features |= NETIF_F_RXHASH;
+ hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+ }
+
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
no_of_vpath, GFP_KERNEL);
@@ -3227,6 +3404,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
"%s: Ethernet device registered",
ndev->name);
+ hldev->ndev = ndev;
*vdev_out = vdev;
/* Resetting the Device stats */
@@ -3261,36 +3439,29 @@ _out0:
*
* This function will unregister and free network device
*/
-static void
-vxge_device_unregister(struct __vxge_hw_device *hldev)
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
struct net_device *dev;
char buf[IFNAMSIZ];
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- u32 level_trace;
-#endif
dev = hldev->ndev;
vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- level_trace = vdev->level_trace;
-#endif
- vxge_debug_entryexit(level_trace,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
- memcpy(buf, vdev->ndev->name, IFNAMSIZ);
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
+ __func__, __LINE__);
+
+ memcpy(buf, dev->name, IFNAMSIZ);
/* in 2.6 will call stop() if device is up */
unregister_netdev(dev);
flush_scheduled_work();
- vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
- vxge_debug_entryexit(level_trace,
- "%s: %s:%d Exiting...", buf, __func__, __LINE__);
+ vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+ buf);
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
+ __func__, __LINE__);
}
/*
@@ -3304,7 +3475,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
enum vxge_hw_event type, u64 vp_id)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath = NULL;
int vpath_idx;
@@ -3813,8 +3984,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
netif_device_detach(netdev);
@@ -3843,8 +4013,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
struct vxgedev *vdev = netdev_priv(netdev);
@@ -3869,8 +4038,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
*/
static void vxge_io_resume(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
if (netif_running(netdev)) {
@@ -3914,6 +4082,142 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
return num_functions;
}
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+ struct __vxge_hw_device *hldev = vdev->devh;
+ u32 maj, min, bld, cmaj, cmin, cbld;
+ enum vxge_hw_status status;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+ VXGE_DRIVER_NAME, fw_name);
+ goto out;
+ }
+
+ /* Load the new firmware onto the adapter */
+ status = vxge_update_fw_image(hldev, fw->data, fw->size);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: FW image download to adapter failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Read the version of the new firmware */
+ status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Upgrade read version failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ cmaj = vdev->config.device_hw_info.fw_version.major;
+ cmin = vdev->config.device_hw_info.fw_version.minor;
+ cbld = vdev->config.device_hw_info.fw_version.build;
+ /* It's possible the version in /lib/firmware is not the latest version.
+ * If so, we could get into a loop of trying to upgrade to the latest
+ * and flashing the older version.
+ */
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+ !override) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+ maj, min, bld);
+
+ /* Flash the adapter with the new firmware */
+ status = vxge_hw_flash_fw(hldev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
+ "hard reset before using, thus requiring a system reboot or a "
+ "hotplug event.\n");
+
+out:
+ return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+ u32 maj, min, bld;
+ int ret, gpxe = 0;
+ char *fw_name;
+
+ maj = vdev->config.device_hw_info.fw_version.major;
+ min = vdev->config.device_hw_info.fw_version.minor;
+ bld = vdev->config.device_hw_info.fw_version.build;
+
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+ return 0;
+
+ /* Ignore the build number when determining if the current firmware is
+ * "too new" to load the driver
+ */
+ if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+ "version, unable to load driver\n",
+ VXGE_DRIVER_NAME);
+ return -EINVAL;
+ }
+
+ /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+ * work with this driver.
+ */
+ if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+ "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ /* If file not specified, determine gPXE or not */
+ if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+ int i;
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+ if (vdev->devh->eprom_versions[i]) {
+ gpxe = 1;
+ break;
+ }
+ }
+ if (gpxe)
+ fw_name = "vxge/X3fw-pxe.ncf";
+ else
+ fw_name = "vxge/X3fw.ncf";
+
+ ret = vxge_fw_upgrade(vdev, fw_name, 0);
+ /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+ * probe, so ignore them
+ */
+ if (ret != -EINVAL && ret != -ENOENT)
+ return -EIO;
+ else
+ ret = 0;
+
+ if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+ VXGE_FW_VER(maj, min, 0)) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+ " be used with this driver.\n"
+ "Please get the latest version from "
+ "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4232,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
static int __devinit
vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
enum vxge_hw_status status;
int ret;
int high_dma = 0;
@@ -4072,16 +4376,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
- if (ll_config->device_hw_info.fw_version.major !=
- VXGE_DRIVER_FW_VERSION_MAJOR) {
- vxge_debug_init(VXGE_ERR,
- "%s: Incorrect firmware version."
- "Please upgrade the firmware to version 1.x.x",
- VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
vpath_mask = ll_config->device_hw_info.vpath_mask;
if (vpath_mask == 0) {
vxge_debug_ll_config(VXGE_TRACE,
@@ -4145,11 +4439,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
+ if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+ ll_config->device_hw_info.fw_version.minor,
+ ll_config->device_hw_info.fw_version.build) >=
+ VXGE_EPROM_FW_VER) {
+ struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+ status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+ VXGE_DRIVER_NAME);
+ /* This is a non-fatal error, continue */
+ }
+
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ hldev->eprom_versions[i] = img[i].version;
+ if (!img[i].is_valid)
+ break;
+ vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+ "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+ VXGE_EPROM_IMG_MAJOR(img[i].version),
+ VXGE_EPROM_IMG_MINOR(img[i].version),
+ VXGE_EPROM_IMG_FIX(img[i].version),
+ VXGE_EPROM_IMG_BUILD(img[i].version));
+ }
+ }
+
/* if FCS stripping is not disabled in MAC fail driver load */
- if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: FCS stripping is not disabled in MAC"
- " failing driver load", VXGE_DRIVER_NAME);
+ status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+ " failing driver load", VXGE_DRIVER_NAME);
ret = -EINVAL;
goto _exit4;
}
@@ -4163,28 +4483,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
ll_config->addr_learn_en = addr_learn_en;
ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
- ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_tcpipv4 = 1;
+ ll_config->rth_hash_type_ipv4 = 0;
+ ll_config->rth_hash_type_tcpipv6 = 0;
+ ll_config->rth_hash_type_ipv6 = 0;
+ ll_config->rth_hash_type_tcpipv6ex = 0;
+ ll_config->rth_hash_type_ipv6ex = 0;
ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
- &vdev)) {
+ ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+ &vdev);
+ if (ret) {
ret = -EINVAL;
goto _exit4;
}
+ ret = vxge_probe_fw_update(vdev);
+ if (ret)
+ goto _exit5;
+
vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
vxge_hw_device_trace_level_get(hldev));
/* set private HW device info */
- hldev->ndev = vdev->ndev;
vdev->mtu = VXGE_HW_DEFAULT_MTU;
vdev->bar0 = attr.bar0;
vdev->max_vpath_supported = max_vpath_supported;
@@ -4286,7 +4610,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s: mac_addr_list : memory allocation failed",
vdev->ndev->name);
ret = -EPERM;
- goto _exit5;
+ goto _exit6;
}
macaddr = (u8 *)&entry->macaddr;
memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4650,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
kfree(ll_config);
return 0;
-_exit5:
+_exit6:
for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
-
+_exit5:
vxge_device_unregister(hldev);
_exit4:
pci_disable_sriov(pdev);
@@ -4354,34 +4678,25 @@ _exit0:
* Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device.
*/
-static void __devexit
-vxge_remove(struct pci_dev *pdev)
+static void __devexit vxge_remove(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
struct vxgedev *vdev = NULL;
struct net_device *dev;
int i = 0;
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- u32 level_trace;
-#endif
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ hldev = pci_get_drvdata(pdev);
if (hldev == NULL)
return;
+
dev = hldev->ndev;
vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- level_trace = vdev->level_trace;
-#endif
- vxge_debug_entryexit(level_trace,
- "%s:%d", __func__, __LINE__);
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
- vxge_debug_init(level_trace,
- "%s : removing PCI device...", __func__);
+ vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+ __func__);
vxge_device_unregister(hldev);
for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4399,16 +4714,16 @@ vxge_remove(struct pci_dev *pdev)
/* we are safe to free it now */
free_netdev(dev);
- vxge_debug_init(level_trace,
- "%s:%d Device unregistered", __func__, __LINE__);
+ vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+ __func__, __LINE__);
vxge_hw_device_terminate(hldev);
pci_disable_device(pdev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- vxge_debug_entryexit(level_trace,
- "%s:%d Exiting...", __func__, __LINE__);
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
+ __LINE__);
}
static struct pci_error_handlers vxge_err_handler = {
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536cb7d0..953cb0ded3e1 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
#define PCI_DEVICE_ID_TITAN_WIN 0x5733
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
+#define VXGE_HW_TITAN1_PCI_REVISION 1
+#define VXGE_HW_TITAN1A_PCI_REVISION 2
+
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
#define VXGE_ALARM_MSIX_ID 2
@@ -53,11 +56,13 @@
#define VXGE_TTI_BTIMER_VAL 250000
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
+#define VXGE_TTI_LTIMER_VAL 1000
+#define VXGE_T1A_TTI_LTIMER_VAL 80
+#define VXGE_TTI_RTIMER_VAL 0
+#define VXGE_T1A_TTI_RTIMER_VAL 400
+#define VXGE_RTI_BTIMER_VAL 250
+#define VXGE_RTI_LTIMER_VAL 100
+#define VXGE_RTI_RTIMER_VAL 0
#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
#define VXGE_ISR_POLLING_CNT 8
#define VXGE_MAX_CONFIG_DEV 0xFF
@@ -76,14 +81,32 @@
#define TTI_TX_UFC_B 40
#define TTI_TX_UFC_C 60
#define TTI_TX_UFC_D 100
+#define TTI_T1A_TX_UFC_A 30
+#define TTI_T1A_TX_UFC_B 80
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
+/* Slope - 93 */
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
+
+/* Slope - 37 */
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
+
+
+#define RTI_RX_URANGE_A 5
+#define RTI_RX_URANGE_B 15
+#define RTI_RX_URANGE_C 40
+#define RTI_T1A_RX_URANGE_A 1
+#define RTI_T1A_RX_URANGE_B 20
+#define RTI_T1A_RX_URANGE_C 50
+#define RTI_RX_UFC_A 1
+#define RTI_RX_UFC_B 5
+#define RTI_RX_UFC_C 10
+#define RTI_RX_UFC_D 15
+#define RTI_T1A_RX_UFC_B 20
+#define RTI_T1A_RX_UFC_C 50
+#define RTI_T1A_RX_UFC_D 60
-#define RTI_RX_URANGE_A 5
-#define RTI_RX_URANGE_B 15
-#define RTI_RX_URANGE_C 40
-#define RTI_RX_UFC_A 1
-#define RTI_RX_UFC_B 5
-#define RTI_RX_UFC_C 10
-#define RTI_RX_UFC_D 15
/* Milli secs timer period */
#define VXGE_TIMER_DELAY 10000
@@ -145,15 +168,15 @@ struct vxge_config {
int addr_learn_en;
- int rth_steering;
- int rth_algorithm;
- int rth_hash_type_tcpipv4;
- int rth_hash_type_ipv4;
- int rth_hash_type_tcpipv6;
- int rth_hash_type_ipv6;
- int rth_hash_type_tcpipv6ex;
- int rth_hash_type_ipv6ex;
- int rth_bkt_sz;
+ u32 rth_steering:2,
+ rth_algorithm:2,
+ rth_hash_type_tcpipv4:1,
+ rth_hash_type_ipv4:1,
+ rth_hash_type_tcpipv6:1,
+ rth_hash_type_ipv6:1,
+ rth_hash_type_tcpipv6ex:1,
+ rth_hash_type_ipv6ex:1,
+ rth_bkt_sz:8;
int rth_jhash_golden_ratio;
int tx_steering_type;
int fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
*/
int driver_id;
- /* copy of the flag indicating whether rx_csum is to be used */
- u32 rx_csum;
+ /* copy of the flag indicating whether rx_csum is to be used */
+ u32 rx_csum:1,
+ rx_hwts:1;
int pkts_processed;
int budget;
@@ -327,7 +351,9 @@ struct vxgedev {
u16 all_multi_flg;
/* A flag indicating whether rx_csum is to be used or not. */
- u32 rx_csum;
+ u32 rx_csum:1,
+ rx_hwts:1,
+ titan1:1;
struct vxge_msix_entry *vxge_entries;
struct msix_entry *entries;
@@ -387,8 +413,6 @@ struct vxge_tx_priv {
static int p = val; \
module_param(p, int, 0)
-#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
-
#define vxge_os_timer(timer, handle, arg, exp) do { \
init_timer(&timer); \
timer.function = handle; \
@@ -397,6 +421,11 @@ struct vxge_tx_priv {
} while (0);
extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
+
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
+
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c9615ef9..3e658b175947 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
+#define VXGE_HW_FW_API_GET_EPROM_REV 31
+
+#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
+#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
+#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
+#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
+
+#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
+#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
+
+#define VXGE_HW_FW_API_GET_FUNC_MODE 29
+#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
+
+#define VXGE_HW_FW_UPGRADE_MEMO 13
+#define VXGE_HW_FW_UPGRADE_ACTION 16
+#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
+#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
+#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
+#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
+
+#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
+#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
+#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
+
#define VXGE_HW_ASIC_MODE_RESERVED 0
#define VXGE_HW_ASIC_MODE_NO_IOV 1
#define VXGE_HW_ASIC_MODE_SR_IOV 2
@@ -165,13 +192,13 @@
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
vxge_bVALn(bits, 48, 16)
#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
+#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
/*0x00a78*/ u64 prc_cfg7;
#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9890d4d596d0..1fceee876228 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
};
-/**
- * enum enum vxge_hw_ring_hash_type - RTH hash types
- * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
- * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
- * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
- *
- * RTH hash types
- */
-enum vxge_hw_ring_hash_type {
- VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
- VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
- VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
- VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
-};
-
enum vxge_hw_status vxge_hw_ring_rxd_reserve(
struct __vxge_hw_ring *ring_handle,
void **rxdh);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe137368..f05bb2f55e73 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,7 +16,34 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "9"
-#define VXGE_VERSION_BUILD "20840"
+#define VXGE_VERSION_FIX "10"
+#define VXGE_VERSION_BUILD "21808"
#define VXGE_VERSION_FOR "k"
+
+#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
+
+#define VXGE_DEAD_FW_VER_MAJOR 1
+#define VXGE_DEAD_FW_VER_MINOR 4
+#define VXGE_DEAD_FW_VER_BUILD 4
+
+#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
+ VXGE_DEAD_FW_VER_MINOR, \
+ VXGE_DEAD_FW_VER_BUILD)
+
+#define VXGE_EPROM_FW_VER_MAJOR 1
+#define VXGE_EPROM_FW_VER_MINOR 6
+#define VXGE_EPROM_FW_VER_BUILD 1
+
+#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
+ VXGE_EPROM_FW_VER_MINOR, \
+ VXGE_EPROM_FW_VER_BUILD)
+
+#define VXGE_CERT_FW_VER_MAJOR 1
+#define VXGE_CERT_FW_VER_MINOR 8
+#define VXGE_CERT_FW_VER_BUILD 1
+
+#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
+ VXGE_CERT_FW_VER_MINOR, \
+ VXGE_CERT_FW_VER_BUILD)
+
#endif
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ade30251608e..6f383cd684b0 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -209,9 +209,6 @@ config RT2X00_LIB_LEDS
boolean
default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
- depends on RT2X00_LIB=y && LEDS_CLASS=m
-
config RT2X00_LIB_DEBUGFS
bool "Ralink debugfs support"
depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 14f0955eca68..2de52d18152f 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -515,7 +515,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
*/
static int xemaclite_set_mac_address(struct net_device *dev, void *address)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sockaddr *addr = address;
if (netif_running(dev))
@@ -534,7 +534,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
*/
static void xemaclite_tx_timeout(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
unsigned long flags;
dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +578,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
*/
static void xemaclite_tx_handler(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
if (lp->deferred_skb) {
@@ -605,7 +605,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
*/
static void xemaclite_rx_handler(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
unsigned int align;
u32 len;
@@ -661,7 +661,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
bool tx_complete = 0;
struct net_device *dev = dev_id;
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
u32 tx_status;
@@ -918,7 +918,7 @@ void xemaclite_adjust_link(struct net_device *ndev)
*/
static int xemaclite_open(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
int retval;
/* Just to be safe, stop the device first */
@@ -987,7 +987,7 @@ static int xemaclite_open(struct net_device *dev)
*/
static int xemaclite_close(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
xemaclite_disable_interrupts(lp);
@@ -1031,7 +1031,7 @@ static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
*/
static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
unsigned int len;
unsigned long flags;
@@ -1068,7 +1068,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
static void xemaclite_remove_ndev(struct net_device *ndev)
{
if (ndev) {
- struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+ struct net_local *lp = netdev_priv(ndev);
if (lp->base_addr)
iounmap((void __iomem __force *) (lp->base_addr));
@@ -1245,7 +1245,7 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
- struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+ struct net_local *lp = netdev_priv(ndev);
/* Un-register the mii_bus, if configured */
if (lp->has_mdio) {
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a329204511..ae07b3dfbcc1 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
#define TX_BUF_SIZE 8192
#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
-#define TX_TIMEOUT 10
+#define TX_TIMEOUT (HZ/10)
struct znet_private {
int rx_dma, tx_dma;